From 7d629eef6f3edc233f47efd5be5d537c44528b11 Mon Sep 17 00:00:00 2001 From: Yoshifumi Kawai Date: Wed, 25 May 2016 21:38:02 +0900 Subject: [PATCH 1/4] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 79c9579..6961b36 100644 --- a/README.md +++ b/README.md @@ -79,8 +79,8 @@ ObservableEventListener is simple wrapper of `EventListener` and `TraceEvent(Mic | LogToConsole | Output by Console.WriteLine with colored. | LogToDebug | Output by Debug.WriteLine. | LogToTrace | Output by Trace.WriteLine. -| LogToFile | Output to flat file by true asynchronous I/O's high performance sink. -| LogToRollingFile | Output to flat file with file rotate rule by true asynchronous I/O's high performance sink. +| LogToFile | Output to flat file. +| LogToRollingFile | Output to flat file with file rotate. | LogTo | LogTo is helper for multiple subscribe. > How to make original Sink? I recommend log to Azure EventHubs, AWS Kinesis, BigQuery Streaming insert directly. Log to file is legacy way! Document is not available yet. Please see [Sinks](https://github.com/neuecc/EtwStream/tree/master/EtwStream.Core/Sinks) codes and please here to me. From bc54bf3970d46d6f22cff13b55b65ca1dd29a900 Mon Sep 17 00:00:00 2001 From: Yoshifumi Kawai Date: Wed, 25 May 2016 21:47:17 +0900 Subject: [PATCH 2/4] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6961b36..391e54d 100644 --- a/README.md +++ b/README.md @@ -125,7 +125,7 @@ ObservableEventListener.FromTraceEvent("SampleEventSource") // fileNameSelector's DateTime is date of file open time, int is number sequence. // timestampPattern's DateTime is write time of message. If pattern is different then roll new file. var d1 = xs.LogToRollingFile( - fileNameSelector: (dt, i) => $@"{dt.ToString("yyyyMMdd")}Log-{i}.log", + fileNameSelector: (dt, i) => $@"{dt.ToString("yyyyMMdd")}_MyLog_{i.ToString("00")}.log", timestampPattern: x => x.ToString("yyyyMMdd"), rollSizeKB: 10000, messageFormatter: x => x.DumpPayloadOrMessage(), From f3bac8d643204d6c79318fc8442f58217d3432bf Mon Sep 17 00:00:00 2001 From: Yoshifumi Kawai Date: Wed, 25 May 2016 21:48:25 +0900 Subject: [PATCH 3/4] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 391e54d..15853f0 100644 --- a/README.md +++ b/README.md @@ -124,6 +124,7 @@ ObservableEventListener.FromTraceEvent("SampleEventSource") // RollingFile: // fileNameSelector's DateTime is date of file open time, int is number sequence. // timestampPattern's DateTime is write time of message. If pattern is different then roll new file. + // timestampPattern must be integer at last word. var d1 = xs.LogToRollingFile( fileNameSelector: (dt, i) => $@"{dt.ToString("yyyyMMdd")}_MyLog_{i.ToString("00")}.log", timestampPattern: x => x.ToString("yyyyMMdd"), From 5c977cbb5f4b0a1251ecf1c3cc1d910346d27a54 Mon Sep 17 00:00:00 2001 From: Yoshifumi Kawai Date: Wed, 25 May 2016 22:10:37 +0900 Subject: [PATCH 4/4] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 15853f0..29013f6 100644 --- a/README.md +++ b/README.md @@ -83,7 +83,7 @@ ObservableEventListener is simple wrapper of `EventListener` and `TraceEvent(Mic | LogToRollingFile | Output to flat file with file rotate. | LogTo | LogTo is helper for multiple subscribe. -> How to make original Sink? I recommend log to Azure EventHubs, AWS Kinesis, BigQuery Streaming insert directly. Log to file is legacy way! Document is not available yet. Please see [Sinks](https://github.com/neuecc/EtwStream/tree/master/EtwStream.Core/Sinks) codes and please here to me. +> How to make original Sink? I recommend log to Azure EventHubs, AWS Kinesis, BigQuery Streaming insert directly. Log to file is legacy way! Document is not available yet. Please see [Sinks](https://github.com/neuecc/EtwStream/tree/master/EtwStream/Sinks) codes and please here to me. > EtwStream's FileSink is fastest file logger, I'll show benchmark results.