Skip to content

Commit

Permalink
Support logging properties into user defined columns
Browse files Browse the repository at this point in the history
  • Loading branch information
lucasgulbranson committed Nov 2, 2015
1 parent 8b60f1d commit 5323191
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 6 deletions.
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
using System;
using System.Data;
using Serilog.Configuration;
using Serilog.Events;
using Serilog.Sinks.MSSqlServer;
Expand Down Expand Up @@ -38,6 +39,7 @@ public static class LoggerConfigurationMSSqlServerExtensions
/// <param name="period">The time to wait between checking for event batches.</param>
/// <param name="formatProvider">Supplies culture-specific formatting information, or null.</param>
/// <param name="storeTimestampInUtc">Store Timestamp In UTC</param>
/// <param name="additionalDataColumns">Additional columns for data storage.</param>
/// <returns>Logger configuration, allowing configuration to continue.</returns>
/// <exception cref="ArgumentNullException">A required parameter is null.</exception>
public static LoggerConfiguration MSSqlServer(
Expand All @@ -47,14 +49,15 @@ public static LoggerConfiguration MSSqlServer(
int batchPostingLimit = MSSqlServerSink.DefaultBatchPostingLimit,
TimeSpan? period = null,
IFormatProvider formatProvider = null,
bool storeTimestampInUtc = false)
bool storeTimestampInUtc = false,
DataColumn[] additionalDataColumns = null)
{
if (loggerConfiguration == null) throw new ArgumentNullException("loggerConfiguration");

var defaultedPeriod = period ?? MSSqlServerSink.DefaultPeriod;

return loggerConfiguration.Sink(
new MSSqlServerSink(connectionString, tableName, storeProperties, batchPostingLimit, defaultedPeriod, formatProvider, storeTimestampInUtc),
new MSSqlServerSink(connectionString, tableName, storeProperties, batchPostingLimit, defaultedPeriod, formatProvider, storeTimestampInUtc, additionalDataColumns),
restrictedToMinimumLevel);
}
}
Expand Down
37 changes: 33 additions & 4 deletions src/Serilog.Sinks.MSSqlServer/Sinks/MSSqlServer/MSSqlServerSink.cs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright 2013 Serilog Contributors
// Copyright 2013 Serilog Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -49,6 +49,8 @@ public class MSSqlServerSink : PeriodicBatchingSink
readonly CancellationTokenSource _token = new CancellationTokenSource();
readonly bool _storeTimestampInUtc;

private DataColumn[] _additionalDataColumns;

/// <summary>
/// Construct a sink posting to the specified database.
/// </summary>
Expand All @@ -59,8 +61,9 @@ public class MSSqlServerSink : PeriodicBatchingSink
/// <param name="period">The time to wait between checking for event batches.</param>
/// <param name="formatProvider">Supplies culture-specific formatting information, or null.</param>
/// <param name="storeTimestampInUtc">Store Timestamp In UTC</param>
/// <param name="additionalDataColumns">Additional columns for data storage.</param>
public MSSqlServerSink(string connectionString, string tableName, bool includeProperties, int batchPostingLimit,
TimeSpan period, IFormatProvider formatProvider, bool storeTimestampInUtc)
TimeSpan period, IFormatProvider formatProvider, bool storeTimestampInUtc, DataColumn[] additionalDataColumns = null )
: base(batchPostingLimit, period)
{
if (string.IsNullOrWhiteSpace(connectionString))
Expand All @@ -75,6 +78,7 @@ public MSSqlServerSink(string connectionString, string tableName, bool includePr
_includeProperties = includeProperties;
_formatProvider = formatProvider;
_storeTimestampInUtc = storeTimestampInUtc;
_additionalDataColumns = additionalDataColumns;

// Prepare the data table
_eventsTable = CreateDataTable();
Expand Down Expand Up @@ -162,6 +166,10 @@ DataTable CreateDataTable()
};
eventsTable.Columns.Add(props);

if ( _additionalDataColumns != null )
{
eventsTable.Columns.AddRange(_additionalDataColumns);
}

// Create an array for DataColumn objects.
var keys = new DataColumn[1];
Expand All @@ -171,7 +179,7 @@ DataTable CreateDataTable()
return eventsTable;
}

void FillDataTable(IEnumerable<LogEvent> events)
void FillDataTable(IEnumerable<LogEvent> events)
{
// Add the new rows to the collection.
foreach (var logEvent in events)
Expand All @@ -188,6 +196,10 @@ void FillDataTable(IEnumerable<LogEvent> events)
{
row["Properties"] = ConvertPropertiesToXmlStructure(logEvent.Properties);
}
if ( _additionalDataColumns != null )
{
ConvertPropertiesToColumn( row, logEvent.Properties );
}

_eventsTable.Rows.Add(row);
}
Expand All @@ -213,6 +225,23 @@ static string ConvertPropertiesToXmlStructure(
return sb.ToString();
}

/// <summary>
/// Mapping values from properties which have a corresponding data row.
/// Matching is done based on Column name and property key
/// </summary>
/// <param name="row"></param>
/// <param name="properties"></param>
private void ConvertPropertiesToColumn(
DataRow row, IReadOnlyDictionary<string, LogEventPropertyValue> properties)
{
foreach (var property in properties)
{
if (row.Table.Columns.Contains(property.Key))
{
row[property.Key] = property.Value.ToString();
}
}
}

/// <summary>
/// Disposes the connection
Expand All @@ -223,7 +252,7 @@ protected override void Dispose(bool disposing)
_token.Cancel();

if (_eventsTable != null)
_eventsTable.Dispose();
_eventsTable.Dispose();

base.Dispose(disposing);
}
Expand Down

8 comments on commit 5323191

@nblumhardt
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @glennl122 - this seems like a good change to make, though it is possible the value type could be mismatched with the column definition.

Perhaps along with your proposed change, using Convert.ChangeType() on the value and perhaps some further validation, could cover it?

Is this something you're interested in sending a PR for?

Cheers!

@lgubranson
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The problem I see with just doing the Convert.ChangeType is in the case that the type cannot be converted, we will fail to write anything to the database thereafter.

var columnName = property.Key;
                    var columnType = row.Table.Columns[columnName].DataType;
                    object conversion;
                    var scalarValue = property.Value as ScalarValue;
                    if ( TryChangeType(scalarValue.Value, columnType, out conversion ) )
                    {
                        row[columnName] = conversion;
                    }

with the following method

   private static bool TryChangeType( object obj, Type type, out object conversion )
        {
            conversion = null;
            try
            {
                conversion = Convert.ChangeType( obj, type );
                return true;
            }
            catch 
            {
                return false;
            }
        }

This does indeed hide failures of conversion, but allows for other columns to have the data entered successfully.

The problem now would be having a SQL column of XML type since .NET abstracts this layer and the ColumnData type is a string. If some one were to try and add malformed XML, the logger will fail to write anything else to the database thereafter.

@nblumhardt
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@lukexggwp yes, sounds like some careful checks/failure tolerance would be needed. For a start just enabling the common string/int/double/decimal/bool/datetime cases would perhaps be a nice improvement; falling back to string might also be sane in the case of a conversion problem? Interesting nonetheless (thanks for checking it out @glennl122).

@lgubranson
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@nblumhardt I'll take a look into this, and perhaps have to add some sort of checks/failure tolerance to the DB commit portion. It'd be ideal if we are not forced to swallow the exception without a user knowing something went wrong. This part will take a bit more investigation. Thanks for your input

@lgubranson
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe to have fixed with my commit from last week, however I believe I did something to the Readme that claims it will be an incompatible merge? The actual source code fixes seem to work well, as I've been using them on a local project without issue

@nblumhardt
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@lukexggwp when that happens, it generally means you need to git merge the target branch back into your PR branch, fix the conflicts, and then push the changes back to your PR. Not sure where to find good doco or I'd link it here, sorry - but should be googleable. HTH!

@lgubranson
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@nblumhardt Thanks! I had to figure out what to google before I could get the proper results on how to handle this with github. Sorry, I'm very new with contributing to a project via GitHub.

@nblumhardt
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No worries - all the best with it.

Please sign in to comment.