diff --git a/.circleci/config.yml b/.circleci/config.yml index c5deb9e1..4e9170cc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,9 +12,6 @@ workflows: after-install-rubocop: - run: gem install rubocop-performance - build-test-windows - - build-test-linux: - name: Ruby 2.7 - docker-image: cimg/ruby:2.7 - build-test-linux: name: Ruby 3.0 docker-image: cimg/ruby:3.0 diff --git a/.ldrelease/config.yml b/.ldrelease/config.yml index 67733426..89f4f24b 100644 --- a/.ldrelease/config.yml +++ b/.ldrelease/config.yml @@ -18,7 +18,7 @@ publications: jobs: - docker: - image: ruby:2.7-buster + image: ruby:3.0-buster template: name: ruby diff --git a/.rubocop.yml b/.rubocop.yml index cbfb56b3..ce336e24 100644 --- a/.rubocop.yml +++ b/.rubocop.yml @@ -2,7 +2,7 @@ require: - rubocop-performance AllCops: - TargetRubyVersion: 2.7 + TargetRubyVersion: 3.0 Include: - lib/**/*.rb - spec/**/*.rb diff --git a/contract-tests/Gemfile b/contract-tests/Gemfile index 4e343a12..2c68b289 100644 --- a/contract-tests/Gemfile +++ b/contract-tests/Gemfile @@ -6,7 +6,8 @@ gem 'sinatra', '~> 2.1' # Sinatra can work with several server frameworks. In JRuby, we have to use glassfish (which # is only available in JRuby). Otherwise we use thin (which is not available in JRuby). gem 'glassfish', :platforms => :jruby -gem 'thin', :platforms => :ruby +gem 'http', '~> 5.1' gem 'json' gem 'rubocop', '~> 1.37', group: 'development' gem 'rubocop-performance', '~> 1.15', group: 'development' +gem 'thin', :platforms => :ruby diff --git a/contract-tests/client_entity.rb b/contract-tests/client_entity.rb index 99a177ba..e47cf8d0 100644 --- a/contract-tests/client_entity.rb +++ b/contract-tests/client_entity.rb @@ -3,6 +3,7 @@ require 'net/http' require 'launchdarkly-server-sdk' require './big_segment_store_fixture' +require 'http' class ClientEntity def initialize(log, config) @@ -77,12 +78,12 @@ def evaluate(params) response = {} if params[:detail] - detail = @client.variation_detail(params[:flagKey], params[:context] || params[:user], params[:defaultValue]) + detail = @client.variation_detail(params[:flagKey], params[:context], params[:defaultValue]) response[:value] = detail.value response[:variationIndex] = detail.variation_index response[:reason] = detail.reason else - response[:value] = @client.variation(params[:flagKey], params[:context] || params[:user], params[:defaultValue]) + response[:value] = @client.variation(params[:flagKey], params[:context], params[:defaultValue]) end response @@ -94,19 +95,65 @@ def evaluate_all(params) opts[:with_reasons] = params[:withReasons] || false opts[:details_only_for_tracked_flags] = params[:detailsOnlyForTrackedFlags] || false - @client.all_flags_state(params[:context] || params[:user], opts) + @client.all_flags_state(params[:context], opts) + end + + def migration_variation(params) + default_stage = params[:defaultStage] + default_stage = default_stage.to_sym if default_stage.respond_to? :to_sym + stage, _ = @client.migration_variation(params[:key], params[:context], default_stage) + stage + end + + def migration_operation(params) + builder = LaunchDarkly::Migrations::MigratorBuilder.new(@client) + builder.read_execution_order(params[:readExecutionOrder].to_sym) + builder.track_latency(params[:trackLatency]) + builder.track_errors(params[:trackErrors]) + + callback = ->(endpoint) { + ->(payload) { + response = HTTP.post(endpoint, body: payload) + + if response.status.success? + LaunchDarkly::Result.success(response.body.to_s) + else + LaunchDarkly::Result.fail("requested failed with status code #{response.status}") + end + } + } + + consistency = nil + if params[:trackConsistency] + consistency = ->(lhs, rhs) { lhs == rhs } + end + + builder.read(callback.call(params[:oldEndpoint]), callback.call(params[:newEndpoint]), consistency) + builder.write(callback.call(params[:oldEndpoint]), callback.call(params[:newEndpoint])) + + migrator = builder.build + + return migrator if migrator.is_a? String + + if params[:operation] == LaunchDarkly::Migrations::OP_READ.to_s + result = migrator.read(params[:key], params[:context], params[:defaultStage].to_sym, params[:payload]) + result.success? ? result.value : result.error + else + result = migrator.write(params[:key], params[:context], params[:defaultStage].to_sym, params[:payload]) + result.authoritative.success? ? result.authoritative.value : result.authoritative.error + end end def secure_mode_hash(params) - @client.secure_mode_hash(params[:context] || params[:user]) + @client.secure_mode_hash(params[:context]) end def track(params) - @client.track(params[:eventKey], params[:context] || params[:user], params[:data], params[:metricValue]) + @client.track(params[:eventKey], params[:context], params[:data], params[:metricValue]) end def identify(params) - @client.identify(params[:context] || params[:user]) + @client.identify(params[:context]) end def flush_events diff --git a/contract-tests/service.rb b/contract-tests/service.rb index 853a63c1..9e3a610a 100644 --- a/contract-tests/service.rb +++ b/contract-tests/service.rb @@ -32,8 +32,9 @@ 'all-flags-details-only-for-tracked-flags', 'filtering', 'secure-mode-hash', - 'user-type', 'tags', + 'migrations', + 'event-sampling', ], }.to_json end @@ -102,6 +103,12 @@ when "getBigSegmentStoreStatus" status = client.get_big_segment_store_status return [200, nil, status.to_json] + when "migrationVariation" + response = {:result => client.migration_variation(params[:migrationVariation]).to_s} + return [200, nil, response.to_json] + when "migrationOperation" + response = {:result => client.migration_operation(params[:migrationOperation]).to_s} + return [200, nil, response.to_json] end return [400, nil, {:error => "Unknown command requested"}.to_json] diff --git a/launchdarkly-server-sdk.gemspec b/launchdarkly-server-sdk.gemspec index a1d299cc..f9a86856 100644 --- a/launchdarkly-server-sdk.gemspec +++ b/launchdarkly-server-sdk.gemspec @@ -19,7 +19,7 @@ Gem::Specification.new do |spec| spec.files = FileList["lib/**/*", "README.md", "LICENSE.txt"] spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } spec.require_paths = ["lib"] - spec.required_ruby_version = ">= 2.7.0" + spec.required_ruby_version = ">= 3.0.0" spec.add_development_dependency "aws-sdk-dynamodb", "~> 1.57" spec.add_development_dependency "bundler", "2.2.33" diff --git a/lib/ldclient-rb.rb b/lib/ldclient-rb.rb index 80d5adec..81289fe2 100644 --- a/lib/ldclient-rb.rb +++ b/lib/ldclient-rb.rb @@ -9,6 +9,7 @@ module LaunchDarkly require "ldclient-rb/interfaces" require "ldclient-rb/util" require "ldclient-rb/flags_state" +require "ldclient-rb/migrations" require "ldclient-rb/ldclient" require "ldclient-rb/cache_store" require "ldclient-rb/expiring_cache" diff --git a/lib/ldclient-rb/config.rb b/lib/ldclient-rb/config.rb index a9cc347a..052d4ce0 100644 --- a/lib/ldclient-rb/config.rb +++ b/lib/ldclient-rb/config.rb @@ -13,18 +13,6 @@ class Config # # Constructor for creating custom LaunchDarkly configurations. # - # `user_keys_capacity` and `user_keys_flush_interval` are deprecated - # configuration options. They exist to maintain backwards compatibility - # with previous configurations. Newer code should prefer their replacement - # options -- `context_keys_capacity` and `context_keys_flush_interval`. - # - # In the event both the user and context variations are provided, the - # context specific configuration option will take precedence. - # - # Similarly, `private_attribute_names` is deprecated. Newer code should - # prefer `private_attributes`. If both are provided, `private_attributes` - # will take precedence. - # # @param opts [Hash] the configuration options # @option opts [Logger] :logger See {#logger}. # @option opts [String] :base_uri ("https://sdk.launchdarkly.com") See {#base_uri}. @@ -42,12 +30,9 @@ class Config # @option opts [Float] :poll_interval (30) See {#poll_interval}. # @option opts [Boolean] :stream (true) See {#stream?}. # @option opts [Boolean] all_attributes_private (false) See {#all_attributes_private}. - # @option opts [Array] :private_attribute_names See {#private_attribute_names}. # @option opts [Array] :private_attributes See {#private_attributes}. # @option opts [Boolean] :send_events (true) See {#send_events}. - # @option opts [Integer] :user_keys_capacity (1000) See {#user_keys_capacity}. # @option opts [Integer] :context_keys_capacity (1000) See {#context_keys_capacity}. - # @option opts [Float] :user_keys_flush_interval (300) See {#user_keys_flush_interval}. # @option opts [Float] :context_keys_flush_interval (300) See {#context_keys_flush_interval}. # @option opts [Object] :data_source See {#data_source}. # @option opts [Boolean] :diagnostic_opt_out (false) See {#diagnostic_opt_out?}. @@ -76,10 +61,10 @@ def initialize(opts = {}) @offline = opts.has_key?(:offline) ? opts[:offline] : Config.default_offline @poll_interval = opts.has_key?(:poll_interval) && opts[:poll_interval] > Config.default_poll_interval ? opts[:poll_interval] : Config.default_poll_interval @all_attributes_private = opts[:all_attributes_private] || false - @private_attributes = opts[:private_attributes] || opts[:private_attribute_names] || [] + @private_attributes = opts[:private_attributes] || [] @send_events = opts.has_key?(:send_events) ? opts[:send_events] : Config.default_send_events - @context_keys_capacity = opts[:context_keys_capacity] || opts[:user_keys_capacity] || Config.default_context_keys_capacity - @context_keys_flush_interval = opts[:context_keys_flush_interval] || opts[:user_keys_flush_interval] || Config.default_user_keys_flush_interval + @context_keys_capacity = opts[:context_keys_capacity] || Config.default_context_keys_capacity + @context_keys_flush_interval = opts[:context_keys_flush_interval] || Config.default_context_keys_flush_interval @data_source = opts[:data_source] @diagnostic_opt_out = opts.has_key?(:diagnostic_opt_out) && opts[:diagnostic_opt_out] @diagnostic_recording_interval = opts.has_key?(:diagnostic_recording_interval) && opts[:diagnostic_recording_interval] > Config.minimum_diagnostic_recording_interval ? @@ -258,14 +243,6 @@ def offline? # attr_reader :private_attributes - # - # @deprecated Backwards compatibility alias for #private_attributes. - # - # @return [Integer] - # @see #private_attributes - # - alias :private_attribute_names :private_attributes - # # Whether to send events back to LaunchDarkly. This differs from {#offline?} in that it affects # only the sending of client-side events, not streaming or polling for events from the server. @@ -281,14 +258,6 @@ def offline? # attr_reader :context_keys_capacity - # - # @deprecated Backwards compatibility alias for #context_keys_capacity. - # - # @return [Integer] - # @see #context_keys_flush_interval - # - alias :user_keys_capacity :context_keys_capacity - # # The interval in seconds at which the event processor will reset its set of known context keys. # @return [Float] @@ -296,14 +265,6 @@ def offline? # attr_reader :context_keys_flush_interval - # - # @deprecated Backwards compatibility alias for #context_keys_flush_interval. - # - # @return [Integer] - # @see #context_keys_flush_interval - # - alias :user_keys_flush_interval :context_keys_flush_interval - # # An object that is responsible for receiving feature flag data from LaunchDarkly. By default, # the client uses its standard polling or streaming implementation; this is customizable for @@ -570,18 +531,6 @@ def self.default_context_keys_flush_interval 300 end - class << self - # - # @deprecated Backwards compatibility alias for #default_context_keys_capacity - # - alias :default_user_keys_capacity :default_context_keys_capacity - - # - # @deprecated Backwards compatibility alias for #default_context_keys_flush_interval - # - alias :default_user_keys_flush_interval :default_context_keys_flush_interval - end - # # The default value for {#diagnostic_recording_interval}. # @return [Float] 900 @@ -647,25 +596,11 @@ def initialize(store:, context_cache_size: nil, context_cache_time: nil, status_ # @return [Integer] attr_reader :context_cache_size - # - # @deprecated Backwards compatibility alias for #context_cache_size - # - # @return [Integer] - # - alias :user_cache_size :context_cache_size - # The maximum length of time (in seconds) that the Big Segment state for a context will be cached # by the SDK. # @return [Float] attr_reader :context_cache_time - # - # @deprecated Backwards compatibility alias for #context_cache_time - # - # @return [Float] - # - alias :user_cache_time :context_cache_time - # The interval (in seconds) at which the SDK will poll the Big Segment store to make sure it is # available and to determine how long ago it was updated. # @return [Float] diff --git a/lib/ldclient-rb/context.rb b/lib/ldclient-rb/context.rb index bd5a81c3..b4687818 100644 --- a/lib/ldclient-rb/context.rb +++ b/lib/ldclient-rb/context.rb @@ -324,7 +324,6 @@ def self.with_key(key, kind = KIND_DEFAULT) # def self.create(data) return create_invalid_context(ERR_NOT_HASH) unless data.is_a?(Hash) - return create_legacy_context(data) unless data.has_key?(:kind) kind = data[:kind] if kind == KIND_MULTI @@ -394,52 +393,6 @@ def self.create_multi(contexts) new(nil, nil, nil, nil, false, nil, nil, error) end - # - # @param data [Hash] - # @return [LDContext] - # - private_class_method def self.create_legacy_context(data) - warn("DEPRECATED: legacy user format will be removed in 8.0.0", uplevel: 1) - - key = data[:key] - - # Legacy users are allowed to have "" as a key but they cannot have nil as a key. - return create_invalid_context(ERR_KEY_EMPTY) if key.nil? - - name = data[:name] - name_error = LaunchDarkly::Impl::Context.validate_name(name) - return create_invalid_context(name_error) unless name_error.nil? - - anonymous = data[:anonymous] - anonymous_error = LaunchDarkly::Impl::Context.validate_anonymous(anonymous, true) - return create_invalid_context(anonymous_error) unless anonymous_error.nil? - - custom = data[:custom] - unless custom.nil? || custom.is_a?(Hash) - return create_invalid_context(ERR_CUSTOM_NON_HASH) - end - - # We only need to create an attribute hash if one of these keys exist. - # Everything else is stored in dedicated instance variables. - attributes = custom.clone - data.each do |k, v| - case k - when :ip, :email, :avatar, :firstName, :lastName, :country - attributes ||= {} - attributes[k] = v.clone - else - next - end - end - - private_attributes = data[:privateAttributeNames] - if private_attributes && !private_attributes.is_a?(Array) - return create_invalid_context(ERR_PRIVATE_NON_ARRAY) - end - - new(key.to_s, key.to_s, KIND_DEFAULT, name, anonymous, attributes, private_attributes) - end - # # @param data [Hash] # @param kind [String] diff --git a/lib/ldclient-rb/evaluation_detail.rb b/lib/ldclient-rb/evaluation_detail.rb index 55d9df87..d30ebb32 100644 --- a/lib/ldclient-rb/evaluation_detail.rb +++ b/lib/ldclient-rb/evaluation_detail.rb @@ -1,4 +1,3 @@ - module LaunchDarkly # An object returned by {LDClient#variation_detail}, combining the result of a flag evaluation with # an explanation of how it was calculated. @@ -13,6 +12,7 @@ class EvaluationDetail def initialize(value, variation_index, reason) raise ArgumentError.new("variation_index must be a number") if !variation_index.nil? && !(variation_index.is_a? Numeric) raise ArgumentError.new("reason must be an EvaluationReason") unless reason.is_a? EvaluationReason + @value = value @variation_index = variation_index @reason = reason @@ -100,6 +100,10 @@ class EvaluationReason # a rule specified a nonexistent variation. An error message will always be logged in this case. ERROR_MALFORMED_FLAG = :MALFORMED_FLAG + # Value for {#error_kind} indicating that there was an inconsistency between the expected type of the flag, and the + # actual type of the variation evaluated. + ERROR_WRONG_TYPE = :WRONG_TYPE + # Value for {#error_kind} indicating that the caller passed `nil` for the context parameter, or the # context was invalid. ERROR_USER_NOT_SPECIFIED = :USER_NOT_SPECIFIED diff --git a/lib/ldclient-rb/events.rb b/lib/ldclient-rb/events.rb index 1c44ba59..8339f735 100644 --- a/lib/ldclient-rb/events.rb +++ b/lib/ldclient-rb/events.rb @@ -40,7 +40,9 @@ def record_eval_event( default = nil, track_events = false, debug_until = nil, - prereq_of = nil + prereq_of = nil, + sampling_ratio = nil, + exclude_from_summaries = false ) end @@ -55,6 +57,9 @@ def record_custom_event( ) end + def record_migration_op_event(event) + end + def flush end @@ -153,10 +158,12 @@ def record_eval_event( default = nil, track_events = false, debug_until = nil, - prereq_of = nil + prereq_of = nil, + sampling_ratio = nil, + exclude_from_summaries = false ) post_to_inbox(LaunchDarkly::Impl::EvalEvent.new(timestamp, context, key, version, variation, value, reason, - default, track_events, debug_until, prereq_of)) + default, track_events, debug_until, prereq_of, sampling_ratio, exclude_from_summaries)) end def record_identify_event(context) @@ -167,6 +174,10 @@ def record_custom_event(context, key, data = nil, metric_value = nil) post_to_inbox(LaunchDarkly::Impl::CustomEvent.new(timestamp, context, key, data, metric_value)) end + def record_migration_op_event(event) + post_to_inbox(event) + end + def flush # flush is done asynchronously post_to_inbox(FlushMessage.new) @@ -220,6 +231,7 @@ def initialize(inbox, sdk_key, config, diagnostic_accumulator, event_sender) @config = config @diagnostic_accumulator = config.diagnostic_opt_out? ? nil : diagnostic_accumulator @event_sender = event_sender + @sampler = LaunchDarkly::Impl::Sampler.new(Random.new) @context_keys = SimpleLRUCacheSet.new(config.context_keys_capacity) @formatter = EventOutputFormatter.new(config) @@ -292,7 +304,7 @@ def dispatch_event(event, outbox) return if @disabled.value # Always record the event in the summary. - outbox.add_to_summary(event) + outbox.add_to_summary(event) unless event.exclude_from_summaries # Decide whether to add the event to the payload. Feature events may be added twice, once for # the event (if tracked) and once for debugging. @@ -309,12 +321,12 @@ def dispatch_event(event, outbox) # For each context we haven't seen before, we add an index event - unless this is already # an identify event for that context. - if !event.context.nil? && !notice_context(event.context) && !event.is_a?(LaunchDarkly::Impl::IdentifyEvent) + if !event.context.nil? && !notice_context(event.context) && !event.is_a?(LaunchDarkly::Impl::IdentifyEvent) && !event.is_a?(LaunchDarkly::Impl::MigrationOpEvent) outbox.add_event(LaunchDarkly::Impl::IndexEvent.new(event.timestamp, event.context)) end - outbox.add_event(event) if will_add_full_event - outbox.add_event(debug_event) unless debug_event.nil? + outbox.add_event(event) if will_add_full_event && @sampler.sample(event.sampling_ratio.nil? ? 1 : event.sampling_ratio) + outbox.add_event(debug_event) if !debug_event.nil? && @sampler.sample(event.sampling_ratio.nil? ? 1 : event.sampling_ratio) end # @@ -443,6 +455,7 @@ class EventOutputFormatter CUSTOM_KIND = 'custom' INDEX_KIND = 'index' DEBUG_KIND = 'debug' + MIGRATION_OP_KIND = 'migration_op' SUMMARY_KIND = 'summary' def initialize(config) @@ -476,6 +489,64 @@ def make_output_events(events, summary) out[:reason] = event.reason unless event.reason.nil? out + when LaunchDarkly::Impl::MigrationOpEvent + out = { + kind: MIGRATION_OP_KIND, + creationDate: event.timestamp, + contextKeys: event.context.keys, + operation: event.operation.to_s, + evaluation: { + key: event.key, + value: event.evaluation.value, + }, + } + + out[:evaluation][:version] = event.version unless event.version.nil? + out[:evaluation][:default] = event.default unless event.default.nil? + out[:evaluation][:variation] = event.evaluation.variation_index unless event.evaluation.variation_index.nil? + out[:evaluation][:reason] = event.evaluation.reason unless event.evaluation.reason.nil? + out[:samplingRatio] = event.sampling_ratio unless event.sampling_ratio.nil? || event.sampling_ratio == 1 + + measurements = [] + + unless event.invoked.empty? + measurements << { + "key": "invoked", + "values": event.invoked.map { |origin| [origin, true] }.to_h, + } + end + + unless event.consistency_check.nil? + measurement = { + "key": "consistent", + "value": event.consistency_check, + } + + unless event.consistency_check_ratio.nil? || event.consistency_check_ratio == 1 + measurement[:samplingRatio] = event.consistency_check_ratio + end + + measurements << measurement + end + + + unless event.latencies.empty? + measurements << { + "key": "latency_ms", + "values": event.latencies, + } + end + + unless event.errors.empty? + measurements << { + "key": "error", + "values": event.errors.map { |origin| [origin, true] }.to_h, + } + end + out[:measurements] = measurements unless measurements.empty? + + out + when LaunchDarkly::Impl::IdentifyEvent { kind: IDENTIFY_KIND, diff --git a/lib/ldclient-rb/impl/big_segments.rb b/lib/ldclient-rb/impl/big_segments.rb index 77a1b9b1..4a7efccf 100644 --- a/lib/ldclient-rb/impl/big_segments.rb +++ b/lib/ldclient-rb/impl/big_segments.rb @@ -23,7 +23,7 @@ def initialize(big_segments_config, logger) @last_status = nil unless @store.nil? - @cache = ExpiringCache.new(big_segments_config.user_cache_size, big_segments_config.user_cache_time) + @cache = ExpiringCache.new(big_segments_config.context_cache_size, big_segments_config.context_cache_time) @poll_worker = RepeatingTask.new(big_segments_config.status_poll_interval, 0, -> { poll_store_and_update_status }, logger) @poll_worker.start end diff --git a/lib/ldclient-rb/impl/event_types.rb b/lib/ldclient-rb/impl/event_types.rb index 1be03eb8..9745373b 100644 --- a/lib/ldclient-rb/impl/event_types.rb +++ b/lib/ldclient-rb/impl/event_types.rb @@ -1,23 +1,33 @@ +require 'set' + module LaunchDarkly module Impl class Event # @param timestamp [Integer] # @param context [LaunchDarkly::LDContext] - def initialize(timestamp, context) + # @param sampling_ratio [Integer, nil] + # @param exclude_from_summaries [Boolean] + def initialize(timestamp, context, sampling_ratio = nil, exclude_from_summaries = false) @timestamp = timestamp @context = context + @sampling_ratio = sampling_ratio + @exclude_from_summaries = exclude_from_summaries end # @return [Integer] attr_reader :timestamp # @return [LaunchDarkly::LDContext] attr_reader :context + # @return [Integer, nil] + attr_reader :sampling_ratio + # @return [Boolean] + attr_reader :exclude_from_summaries end class EvalEvent < Event def initialize(timestamp, context, key, version = nil, variation = nil, value = nil, reason = nil, default = nil, - track_events = false, debug_until = nil, prereq_of = nil) - super(timestamp, context) + track_events = false, debug_until = nil, prereq_of = nil, sampling_ratio = nil, exclude_from_summaries = false) + super(timestamp, context, sampling_ratio, exclude_from_summaries) @key = key @version = version @variation = variation @@ -41,6 +51,54 @@ def initialize(timestamp, context, key, version = nil, variation = nil, value = attr_reader :prereq_of end + class MigrationOpEvent < Event + # + # A migration op event represents the results of a migration-assisted read or write operation. + # + # The event includes optional measurements reporting on consistency checks, error reporting, and operation latency + # values. + # + # @param timestamp [Integer] + # @param context [LaunchDarkly::LDContext] + # @param key [string] + # @param flag [LaunchDarkly::Impl::Model::FeatureFlag, nil] + # @param operation [Symbol] + # @param default_stage [Symbol] + # @param evaluation [LaunchDarkly::EvaluationDetail] + # @param invoked [Set] + # @param consistency_check [Boolean, nil] + # @param consistency_check_ratio [Integer, nil] + # @param errors [Set] + # @param latencies [Hash] + # + def initialize(timestamp, context, key, flag, operation, default_stage, evaluation, invoked, consistency_check, consistency_check_ratio, errors, latencies) + super(timestamp, context) + @operation = operation + @key = key + @version = flag&.version + @sampling_ratio = flag&.sampling_ratio + @default = default_stage + @evaluation = evaluation + @consistency_check = consistency_check + @consistency_check_ratio = consistency_check.nil? ? nil : consistency_check_ratio + @invoked = invoked + @errors = errors + @latencies = latencies + end + + attr_reader :operation + attr_reader :key + attr_reader :version + attr_reader :sampling_ratio + attr_reader :default + attr_reader :evaluation + attr_reader :consistency_check + attr_reader :consistency_check_ratio + attr_reader :invoked + attr_reader :errors + attr_reader :latencies + end + class IdentifyEvent < Event def initialize(timestamp, context) super(timestamp, context) diff --git a/lib/ldclient-rb/impl/migrations/migrator.rb b/lib/ldclient-rb/impl/migrations/migrator.rb new file mode 100644 index 00000000..8fff0448 --- /dev/null +++ b/lib/ldclient-rb/impl/migrations/migrator.rb @@ -0,0 +1,287 @@ +require 'thread' + +module LaunchDarkly + module Impl + module Migrations + + # + # A migration config stores references to callable methods which execute customer defined read or write + # operations on old or new origins of information. For read operations, an optional comparison function also be + # defined. + # + class MigrationConfig + # + # @param old [#call] Refer to {#old} + # @param new [#call] Refer to {#new} + # @param comparison [#call, nil] Refer to {#comparison} + # + def initialize(old, new, comparison) + @old = old + @new = new + @comparison = comparison + end + + # + # Callable which receives a nullable payload parameter and returns an {LaunchDarkly::Result}. + # + # This function call should affect the old migration origin when called. + # + # @return [#call] + # + attr_reader :old + + # + # Callable which receives a nullable payload parameter and returns an {LaunchDarkly::Result}. + # + # This function call should affect the new migration origin when called. + # + # @return [#call] + # + attr_reader :new + + # + # Optional callable which receives two objects of any kind and returns a boolean representing equality. + # + # The result of this comparison can be sent upstream to LaunchDarkly to enhance migration observability. + # + # @return [#call, nil] + # + attr_reader :comparison + end + + # + # An implementation of the [LaunchDarkly::Interfaces::Migrations::Migrator] interface, capable of supporting + # feature-flag backed technology migrations. + # + class Migrator + include LaunchDarkly::Interfaces::Migrations::Migrator + + # + # @param client [LaunchDarkly::LDClient] + # @param read_execution_order [Symbol] + # @param read_config [MigrationConfig] + # @param write_config [MigrationConfig] + # @param measure_latency [Boolean] + # @param measure_errors [Boolean] + # + def initialize(client, read_execution_order, read_config, write_config, measure_latency, measure_errors) + @client = client + @read_execution_order = read_execution_order + @read_config = read_config + @write_config = write_config + @measure_latency = measure_latency + @measure_errors = measure_errors + @sampler = LaunchDarkly::Impl::Sampler.new(Random.new) + end + + # + # Perform the configured read operations against the appropriate old and/or new origins. + # + # @param key [String] The migration-based flag key to use for determining migration stages + # @param context [LaunchDarkly::LDContext] The context to use for evaluating the migration flag + # @param default_stage [Symbol] The stage to fallback to if one could not be determined for the requested flag + # @param payload [String] An optional payload to pass through to the configured read operations. + # + # @return [LaunchDarkly::Migrations::OperationResult] + # + def read(key, context, default_stage, payload = nil) + stage, tracker = @client.migration_variation(key, context, default_stage) + tracker.operation(LaunchDarkly::Migrations::OP_READ) + + old = Executor.new(@client.logger, LaunchDarkly::Migrations::ORIGIN_OLD, @read_config.old, tracker, @measure_latency, @measure_errors, payload) + new = Executor.new(@client.logger, LaunchDarkly::Migrations::ORIGIN_NEW, @read_config.new, tracker, @measure_latency, @measure_errors, payload) + + case stage + when LaunchDarkly::Migrations::STAGE_OFF + result = old.run + when LaunchDarkly::Migrations::STAGE_DUALWRITE + result = old.run + when LaunchDarkly::Migrations::STAGE_SHADOW + result = read_both(old, new, @read_config.comparison, @read_execution_order, tracker) + when LaunchDarkly::Migrations::STAGE_LIVE + result = read_both(new, old, @read_config.comparison, @read_execution_order, tracker) + when LaunchDarkly::Migrations::STAGE_RAMPDOWN + result = new.run + when LaunchDarkly::Migrations::STAGE_COMPLETE + result = new.run + else + result = LaunchDarkly::Migrations::OperationResult.new( + LaunchDarkly::Migrations::ORIGIN_OLD, + LaunchDarkly::Result.fail("invalid stage #{stage}; cannot execute read") + ) + end + + @client.track_migration_op(tracker) + + result + end + + # + # Perform the configured write operations against the appropriate old and/or new origins. + # + # @param key [String] The migration-based flag key to use for determining migration stages + # @param context [LaunchDarkly::LDContext] The context to use for evaluating the migration flag + # @param default_stage [Symbol] The stage to fallback to if one could not be determined for the requested flag + # @param payload [String] An optional payload to pass through to the configured write operations. + # + # @return [LaunchDarkly::Migrations::WriteResult] + # + def write(key, context, default_stage, payload = nil) + stage, tracker = @client.migration_variation(key, context, default_stage) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + + old = Executor.new(@client.logger, LaunchDarkly::Migrations::ORIGIN_OLD, @write_config.old, tracker, @measure_latency, @measure_errors, payload) + new = Executor.new(@client.logger, LaunchDarkly::Migrations::ORIGIN_NEW, @write_config.new, tracker, @measure_latency, @measure_errors, payload) + + case stage + when LaunchDarkly::Migrations::STAGE_OFF + result = old.run() + write_result = LaunchDarkly::Migrations::WriteResult.new(result) + when LaunchDarkly::Migrations::STAGE_DUALWRITE + authoritative_result, nonauthoritative_result = write_both(old, new, tracker) + write_result = LaunchDarkly::Migrations::WriteResult.new(authoritative_result, nonauthoritative_result) + when LaunchDarkly::Migrations::STAGE_SHADOW + authoritative_result, nonauthoritative_result = write_both(old, new, tracker) + write_result = LaunchDarkly::Migrations::WriteResult.new(authoritative_result, nonauthoritative_result) + when LaunchDarkly::Migrations::STAGE_LIVE + authoritative_result, nonauthoritative_result = write_both(new, old, tracker) + write_result = LaunchDarkly::Migrations::WriteResult.new(authoritative_result, nonauthoritative_result) + when LaunchDarkly::Migrations::STAGE_RAMPDOWN + authoritative_result, nonauthoritative_result = write_both(new, old, tracker) + write_result = LaunchDarkly::Migrations::WriteResult.new(authoritative_result, nonauthoritative_result) + when LaunchDarkly::Migrations::STAGE_COMPLETE + result = new.run() + write_result = LaunchDarkly::Migrations::WriteResult.new(result) + else + result = LaunchDarkly::Migrations::OperationResult.fail( + LaunchDarkly::Migrations::ORIGIN_OLD, + LaunchDarkly::Result.fail("invalid stage #{stage}; cannot execute write") + ) + write_result = LaunchDarkly::Migrations::WriteResult.new(result) + end + + @client.track_migration_op(tracker) + + write_result + end + + # + # Execute both read methods in accordance with the requested execution order. + # + # This method always returns the {LaunchDarkly::Migrations::OperationResult} from running the authoritative read operation. The + # non-authoritative executor may fail but it will not affect the return value. + # + # @param authoritative [Executor] + # @param nonauthoritative [Executor] + # @param comparison [#call] + # @param execution_order [Symbol] + # @param tracker [LaunchDarkly::Interfaces::Migrations::OpTracker] + # + # @return [LaunchDarkly::Migrations::OperationResult] + # + private def read_both(authoritative, nonauthoritative, comparison, execution_order, tracker) + authoritative_result = nil + nonauthoritative_result = nil + + case execution_order + when LaunchDarkly::Migrations::MigratorBuilder::EXECUTION_PARALLEL + auth_handler = Thread.new { authoritative_result = authoritative.run } + nonauth_handler = Thread.new { nonauthoritative_result = nonauthoritative.run } + + auth_handler.join() + nonauth_handler.join() + when LaunchDarkly::Migrations::MigratorBuilder::EXECUTION_RANDOM && @sampler.sample(2) + nonauthoritative_result = nonauthoritative.run + authoritative_result = authoritative.run + else + authoritative_result = authoritative.run + nonauthoritative_result = nonauthoritative.run + end + + return authoritative_result if comparison.nil? + + if authoritative_result.success? && nonauthoritative_result.success? + tracker.consistent(->{ comparison.call(authoritative_result.value, nonauthoritative_result.value) }) + end + + authoritative_result + end + + # + # Execute both operations sequentially. + # + # If the authoritative executor fails, do not run the non-authoritative one. As a result, this method will + # always return an authoritative {LaunchDarkly::Migrations::OperationResult} as the first value, and optionally the non-authoritative + # {LaunchDarkly::Migrations::OperationResult} as the second value. + # + # @param authoritative [Executor] + # @param nonauthoritative [Executor] + # @param tracker [LaunchDarkly::Interfaces::Migrations::OpTracker] + # + # @return [Array] + # + private def write_both(authoritative, nonauthoritative, tracker) + authoritative_result = authoritative.run() + tracker.invoked(authoritative.origin) + + return authoritative_result, nil unless authoritative_result.success? + + nonauthoritative_result = nonauthoritative.run() + tracker.invoked(nonauthoritative.origin) + + [authoritative_result, nonauthoritative_result] + end + end + + # + # Utility class for executing migration operations while also tracking our built-in migration measurements. + # + class Executor + # + # @return [Symbol] + # + attr_reader :origin + + # + # @param origin [Symbol] + # @param fn [#call] + # @param tracker [LaunchDarkly::Interfaces::Migrations::OpTracker] + # @param measure_latency [Boolean] + # @param measure_errors [Boolean] + # @param payload [Object, nil] + # + def initialize(logger, origin, fn, tracker, measure_latency, measure_errors, payload) + @logger = logger + @origin = origin + @fn = fn + @tracker = tracker + @measure_latency = measure_latency + @measure_errors = measure_errors + @payload = payload + end + + # + # Execute the configured operation and track any available measurements. + # + # @return [LaunchDarkly::Migrations::OperationResult] + # + def run() + start = Time.now + + begin + result = @fn.call(@payload) + rescue => e + LaunchDarkly::Util.log_exception(@logger, "Unexpected error running method for '#{origin}' origin", e) + result = LaunchDarkly::Result.fail("'#{origin}' operation raised an exception", e) + end + + @tracker.latency(@origin, (Time.now - start) * 1_000) if @measure_latency + @tracker.error(@origin) if @measure_errors && !result.success? + @tracker.invoked(@origin) + + LaunchDarkly::Migrations::OperationResult.new(@origin, result) + end + end + end + end +end diff --git a/lib/ldclient-rb/impl/migrations/tracker.rb b/lib/ldclient-rb/impl/migrations/tracker.rb new file mode 100644 index 00000000..546f4552 --- /dev/null +++ b/lib/ldclient-rb/impl/migrations/tracker.rb @@ -0,0 +1,136 @@ +require "set" +require "ldclient-rb/impl/sampler" +require "logger" + +module LaunchDarkly + module Impl + module Migrations + class OpTracker + include LaunchDarkly::Interfaces::Migrations::OpTracker + + # + # @param logger [Logger] logger + # @param key [string] key + # @param flag [LaunchDarkly::Impl::Model::FeatureFlag] flag + # @param context [LaunchDarkly::LDContext] context + # @param detail [LaunchDarkly::EvaluationDetail] detail + # @param default_stage [Symbol] default_stage + # + def initialize(logger, key, flag, context, detail, default_stage) + @logger = logger + @key = key + @flag = flag + @context = context + @detail = detail + @default_stage = default_stage + @sampler = LaunchDarkly::Impl::Sampler.new(Random.new) + + @mutex = Mutex.new + + # @type [Symbol, nil] + @operation = nil + + # @type [Set] + @invoked = Set.new + # @type [Boolean, nil] + @consistent = nil + + # @type [Int] + @consistent_ratio = @flag&.migration_settings&.check_ratio + @consistent_ratio = 1 if @consistent_ratio.nil? + + # @type [Set] + @errors = Set.new + # @type [Hash] + @latencies = Hash.new + end + + def operation(operation) + return unless LaunchDarkly::Migrations::VALID_OPERATIONS.include? operation + + @mutex.synchronize do + @operation = operation + end + end + + def invoked(origin) + return unless LaunchDarkly::Migrations::VALID_ORIGINS.include? origin + + @mutex.synchronize do + @invoked.add(origin) + end + end + + def consistent(is_consistent) + @mutex.synchronize do + if @sampler.sample(@consistent_ratio) + begin + @consistent = is_consistent.call + rescue => e + LaunchDarkly::Util.log_exception(@logger, "Exception raised during consistency check; failed to record measurement", e) + end + end + end + end + + def error(origin) + return unless LaunchDarkly::Migrations::VALID_ORIGINS.include? origin + + @mutex.synchronize do + @errors.add(origin) + end + end + + def latency(origin, duration) + return unless LaunchDarkly::Migrations::VALID_ORIGINS.include? origin + return unless duration.is_a? Numeric + return if duration < 0 + + @mutex.synchronize do + @latencies[origin] = duration + end + end + + def build + @mutex.synchronize do + return "operation cannot contain an empty key" if @key.empty? + return "operation not provided" if @operation.nil? + return "no origins were invoked" if @invoked.empty? + return "provided context was invalid" unless @context.valid? + + result = check_invoked_consistency + return result unless result == true + + LaunchDarkly::Impl::MigrationOpEvent.new( + LaunchDarkly::Impl::Util.current_time_millis, + @context, + @key, + @flag, + @operation, + @default_stage, + @detail, + @invoked, + @consistent, + @consistent_ratio, + @errors, + @latencies + ) + end + end + + private def check_invoked_consistency + LaunchDarkly::Migrations::VALID_ORIGINS.each do |origin| + next if @invoked.include? origin + + return "provided latency for origin '#{origin}' without recording invocation" if @latencies.include? origin + return "provided error for origin '#{origin}' without recording invocation" if @errors.include? origin + end + + return "provided consistency without recording both invocations" if !@consistent.nil? && @invoked.size != 2 + + true + end + end + end + end +end diff --git a/lib/ldclient-rb/impl/model/feature_flag.rb b/lib/ldclient-rb/impl/model/feature_flag.rb index 34dd1bed..1def3d19 100644 --- a/lib/ldclient-rb/impl/model/feature_flag.rb +++ b/lib/ldclient-rb/impl/model/feature_flag.rb @@ -26,6 +26,10 @@ def initialize(data, logger = nil) @version = data[:version] @deleted = !!data[:deleted] return if @deleted + migration_settings = data[:migration] || {} + @migration_settings = MigrationSettings.new(migration_settings[:checkRatio]) + @sampling_ratio = data[:samplingRatio] + @exclude_from_summaries = !!data[:excludeFromSummaries] @variations = data[:variations] || [] @on = !!data[:on] fallthrough = data[:fallthrough] || {} @@ -63,6 +67,12 @@ def initialize(data, logger = nil) attr_reader :version # @return [Boolean] attr_reader :deleted + # @return [MigrationSettings, nil] + attr_reader :migration_settings + # @return [Integer, nil] + attr_reader :sampling_ratio + # @return [Boolean, nil] + attr_reader :exclude_from_summaries # @return [Array] attr_reader :variations # @return [Boolean] @@ -173,6 +183,19 @@ def initialize(data, rule_index, flag, errors_out = nil) attr_reader :variation_or_rollout end + + class MigrationSettings + # + # @param check_ratio [Int, nil] + # + def initialize(check_ratio) + @check_ratio = check_ratio + end + + # @return [Integer, nil] + attr_reader :check_ratio + end + class VariationOrRollout def initialize(variation, rollout_data, flag = nil, errors_out = nil, description = nil) @variation = variation diff --git a/lib/ldclient-rb/impl/sampler.rb b/lib/ldclient-rb/impl/sampler.rb new file mode 100644 index 00000000..b611628b --- /dev/null +++ b/lib/ldclient-rb/impl/sampler.rb @@ -0,0 +1,25 @@ +module LaunchDarkly + module Impl + class Sampler + # + # @param random [Random] + # + def initialize(random) + @random = random + end + + # + # @param ratio [Int] + # + # @return [Boolean] + # + def sample(ratio) + return false unless ratio.is_a? Integer + return false if ratio <= 0 + return true if ratio == 1 + + @random.rand(1.0) < 1.0 / ratio + end + end + end +end diff --git a/lib/ldclient-rb/integrations/test_data/flag_builder.rb b/lib/ldclient-rb/integrations/test_data/flag_builder.rb index 2b8a495d..2c4aa1b0 100644 --- a/lib/ldclient-rb/integrations/test_data/flag_builder.rb +++ b/lib/ldclient-rb/integrations/test_data/flag_builder.rb @@ -43,6 +43,47 @@ def on(on) self end + # + # Set the migration related settings for this feature flag. + # + # The settings hash should be built using the {FlagMigrationSettingsBuilder}. + # + # @param settings [Hash] + # @return [FlagBuilder] the builder + # + def migration_settings(settings) + @migration_settings = settings + self + end + + # + # Set the sampling ratio for this flag. This ratio is used to control the emission rate of feature, debug, and + # migration op events. + # + # General usage should not require interacting with this method. + # + # @param ratio [Integer] + # @return [FlagBuilder] + # + def sampling_ratio(ratio) + @sampling_ratio = ratio + self + end + + # + # Set the option to exclude this flag from summary events. This is used to control the size of the summary event + # in the event certain flag payloads are large. + # + # General usage should not require interacting with this method. + # + # @param exclude [Boolean] + # @return [FlagBuilder] + # + def exclude_from_summaries(exclude) + @exclude_from_summaries = exclude + self + end + # # Specifies the fallthrough variation. The fallthrough is the value # that is returned if targeting is on and the context was not matched by a more specific @@ -128,11 +169,6 @@ def variation_for_all(variation) end end - # - # @deprecated Backwards compatibility alias for #variation_for_all - # - alias_method :variation_for_all_users, :variation_for_all - # # Sets the flag to always return the specified variation value for all context. # @@ -148,11 +184,6 @@ def value_for_all(value) variations(value).variation_for_all(0) end - # - # @deprecated Backwards compatibility alias for #value_for_all - # - alias_method :value_for_all_users, :value_for_all - # # Sets the flag to return the specified variation for a specific context key when targeting # is on. @@ -315,11 +346,6 @@ def clear_targets self end - # - # @deprecated Backwards compatibility alias for #clear_targets - # - alias_method :clear_user_targets, :clear_targets - # # Removes any existing rules from the flag. # This undoes the effect of methods like {#if_match} @@ -376,6 +402,18 @@ def build(version) res[:fallthrough] = { variation: @fallthrough_variation } end + unless @migration_settings.nil? + res[:migration] = @migration_settings + end + + unless @sampling_ratio.nil? || @sampling_ratio == 1 + res[:samplingRatio] = @sampling_ratio + end + + unless @exclude_from_summaries.nil? || !@exclude_from_summaries + res[:excludeFromSummaries] = @exclude_from_summaries + end + unless @targets.nil? targets = [] context_targets = [] @@ -403,6 +441,37 @@ def build(version) res end + # + # A builder for feature flag migration settings to be used with {FlagBuilder}. + # + # In the LaunchDarkly model, a flag can be a standard feature flag, or it can be a migration-related flag, in + # which case it has migration-specified related settings. These settings control things like the rate at which + # reads are tested for consistency between origins. + # + class FlagMigrationSettingsBuilder + def initialize() + @check_ratio = nil + end + + # + # @param ratio [Integer] + # @return [FlagMigrationSettingsBuilder] + # + def check_ratio(ratio) + return unless ratio.is_a? Integer + @check_ratio = ratio + self + end + + def build + return nil if @check_ratio.nil? || @check_ratio == 1 + + { + "checkRatio": @check_ratio, + } + end + end + # # A builder for feature flag rules to be used with {FlagBuilder}. # diff --git a/lib/ldclient-rb/interfaces.rb b/lib/ldclient-rb/interfaces.rb index c3a6ac15..d42f924e 100644 --- a/lib/ldclient-rb/interfaces.rb +++ b/lib/ldclient-rb/interfaces.rb @@ -788,5 +788,102 @@ def initialize(kind, status_code, message, time) end end end + + # + # Namespace for feature-flag based technology migration support. + # + module Migrations + # + # A migrator is the interface through which migration support is executed. A migrator is configured through the + # {LaunchDarkly::Migrations::MigratorBuilder} class. + # + module Migrator + # + # Uses the provided flag key and context to execute a migration-backed read operation. + # + # @param key [String] + # @param context [LaunchDarkly::LDContext] + # @param default_stage [Symbol] + # @param payload [Object, nil] + # + # @return [LaunchDarkly::Migrations::OperationResult] + # + def read(key, context, default_stage, payload = nil) end + + # + # Uses the provided flag key and context to execute a migration-backed write operation. + # + # @param key [String] + # @param context [LaunchDarkly::LDContext] + # @param default_stage [Symbol] + # @param payload [Object, nil] + # + # @return [LaunchDarkly::Migrations::WriteResult] + # + def write(key, context, default_stage, payload = nil) end + end + + # + # An OpTracker is responsible for managing the collection of measurements that which a user might wish to record + # throughout a migration-assisted operation. + # + # Example measurements include latency, errors, and consistency. + # + # This data can be provided to the {LaunchDarkly::LDClient.track_migration_op} method to relay this metric + # information upstream to LaunchDarkly services. + # + module OpTracker + # + # Sets the migration related operation associated with these tracking measurements. + # + # @param [Symbol] op The read or write operation symbol. + # + def operation(op) end + + # + # Allows recording which origins were called during a migration. + # + # @param [Symbol] origin Designation for the old or new origin. + # + def invoked(origin) end + + # + # Allows recording the results of a consistency check. + # + # This method accepts a callable which should take no parameters and return a single boolean to represent the + # consistency check results for a read operation. + # + # A callable is provided in case sampling rules do not require consistency checking to run. In this case, we can + # avoid the overhead of a function by not using the callable. + # + # @param [#call] is_consistent closure to return result of comparison check + # + def consistent(is_consistent) end + + # + # Allows recording whether an error occurred during the operation. + # + # @param [Symbol] origin Designation for the old or new origin. + # + def error(origin) end + + # + # Allows tracking the recorded latency for an individual operation. + # + # @param [Symbol] origin Designation for the old or new origin. + # @param [Float] duration Duration measurement in milliseconds (ms). + # + def latency(origin, duration) end + + # + # Creates an instance of {LaunchDarkly::Impl::MigrationOpEventData}. + # + # @return [LaunchDarkly::Impl::MigrationOpEvent, String] A migration op event or a string describing the error. + # failure. + # + def build + end + end + end end end diff --git a/lib/ldclient-rb/ldclient.rb b/lib/ldclient-rb/ldclient.rb index 2afba39c..e5bdae69 100644 --- a/lib/ldclient-rb/ldclient.rb +++ b/lib/ldclient-rb/ldclient.rb @@ -6,8 +6,10 @@ require "ldclient-rb/impl/evaluator" require "ldclient-rb/impl/flag_tracker" require "ldclient-rb/impl/store_client_wrapper" +require "ldclient-rb/impl/migrations/tracker" require "concurrent/atomics" require "digest/sha1" +require "forwardable" require "logger" require "benchmark" require "json" @@ -20,6 +22,10 @@ module LaunchDarkly # class LDClient include Impl + extend Forwardable + + def_delegators :@config, :logger + # # Creates a new client instance that connects to LaunchDarkly. A custom # configuration parameter can also supplied to specify advanced options, @@ -148,7 +154,7 @@ def flush # @return [String, nil] a hash string or nil if the provided context was invalid # def secure_mode_hash(context) - context = Impl::Context::make_context(context) + context = Impl::Context.make_context(context) unless context.valid? @config.logger.warn("secure_mode_hash called with invalid context: #{context.error}") return nil @@ -188,10 +194,11 @@ def initialized? # @param default the default value of the flag; this is used if there is an error # condition making it impossible to find or evaluate the flag # - # @return the variation for the provided context, or the default value if there's an an error + # @return the variation for the provided context, or the default value if there's an error # def variation(key, context, default) - evaluate_internal(key, context, default, false).value + detail, _, _, = variation_with_flag(key, context, default) + detail.value end # @@ -218,7 +225,43 @@ def variation(key, context, default) # @return [EvaluationDetail] an object describing the result # def variation_detail(key, context, default) - evaluate_internal(key, context, default, true) + detail, _, _ = evaluate_internal(key, context, default, true) + detail + end + + # + # This method returns the migration stage of the migration feature flag for the given evaluation context. + # + # This method returns the default stage if there is an error or the flag does not exist. If the default stage is not + # a valid stage, then a default stage of 'off' will be used instead. + # + # @param key [String] + # @param context [LDContext] + # @param default_stage [Symbol] + # + # @return [Array] + # + def migration_variation(key, context, default_stage) + unless Migrations::VALID_STAGES.include? default_stage + @config.logger.error { "[LDClient] default_stage #{default_stage} is not a valid stage; continuing with 'off' as default" } + default_stage = Migrations::STAGE_OFF + end + + context = Impl::Context::make_context(context) + detail, flag, _ = variation_with_flag(key, context, default_stage.to_s) + + stage = detail.value + stage = stage.to_sym if stage.respond_to? :to_sym + + if Migrations::VALID_STAGES.include?(stage) + tracker = Impl::Migrations::OpTracker.new(@config.logger, key, flag, context, detail, default_stage) + return stage, tracker + end + + detail = LaunchDarkly::Impl::Evaluator.error_result(LaunchDarkly::EvaluationReason::ERROR_WRONG_TYPE, default_stage.to_s) + tracker = Impl::Migrations::OpTracker.new(@config.logger, key, flag, context, detail, default_stage) + + [default_stage, tracker] end # @@ -281,6 +324,32 @@ def track(event_name, context, data = nil, metric_value = nil) @event_processor.record_custom_event(context, event_name, data, metric_value) end + # + # Tracks the results of a migrations operation. This event includes measurements which can be used to enhance the + # observability of a migration within the LaunchDarkly UI. + # + # This event should be generated through {Interfaces::Migrations::OpTracker}. If you are using the + # {Interfaces::Migrations::Migrator} to handle migrations, this event will be created and emitted + # automatically. + # + # @param tracker [LaunchDarkly::Interfaces::Migrations::OpTracker] + # + def track_migration_op(tracker) + unless tracker.is_a? LaunchDarkly::Interfaces::Migrations::OpTracker + @config.logger.error { "invalid op tracker received in track_migration_op" } + return + end + + event = tracker.build + if event.is_a? String + @config.logger.error { "[LDClient] Error occurred generating migration op event; #{event}" } + return + end + + + @event_processor.record_migration_op_event(event) + end + # # Returns a {FeatureFlagsState} object that encapsulates the state of all feature flags for a given context, # including the flag values and also metadata that can be used on the front end. This method does not @@ -430,24 +499,41 @@ def create_default_data_source(sdk_key, config, diagnostic_accumulator) end end + # + # @param key [String] # @param context [Hash, LDContext] - # @return [EvaluationDetail] + # @param default [Object] + # + # @return [Array] + # + def variation_with_flag(key, context, default) + evaluate_internal(key, context, default, false) + end + + # + # @param key [String] + # @param context [Hash, LDContext] + # @param default [Object] + # @param with_reasons [Boolean] + # + # @return [Array] + # def evaluate_internal(key, context, default, with_reasons) if @config.offline? - return Evaluator.error_result(EvaluationReason::ERROR_CLIENT_NOT_READY, default) + return Evaluator.error_result(EvaluationReason::ERROR_CLIENT_NOT_READY, default), nil, nil end if context.nil? @config.logger.error { "[LDClient] Must specify context" } detail = Evaluator.error_result(EvaluationReason::ERROR_USER_NOT_SPECIFIED, default) - return detail + return detail, nil, "no context provided" end context = Impl::Context::make_context(context) unless context.valid? @config.logger.error { "[LDClient] Context was invalid for evaluation of flag '#{key}' (#{context.error}); returning default value" } detail = Evaluator.error_result(EvaluationReason::ERROR_USER_NOT_SPECIFIED, default) - return detail + return detail, nil, context.error end unless initialized? @@ -457,7 +543,7 @@ def evaluate_internal(key, context, default, with_reasons) @config.logger.error { "[LDClient] Client has not finished initializing; feature store unavailable, returning default value" } detail = Evaluator.error_result(EvaluationReason::ERROR_CLIENT_NOT_READY, default) record_unknown_flag_eval(key, context, default, detail.reason, with_reasons) - return detail + return detail, nil, "client not initialized" end end @@ -471,7 +557,7 @@ def evaluate_internal(key, context, default, with_reasons) @config.logger.info { "[LDClient] Unknown feature flag \"#{key}\". Returning default value" } detail = Evaluator.error_result(EvaluationReason::ERROR_FLAG_NOT_FOUND, default) record_unknown_flag_eval(key, context, default, detail.reason, with_reasons) - return detail + return detail, nil, "feature flag not found" end begin @@ -486,12 +572,12 @@ def evaluate_internal(key, context, default, with_reasons) detail = EvaluationDetail.new(default, nil, detail.reason) end record_flag_eval(feature, context, detail, default, with_reasons) - detail + [detail, feature, nil] rescue => exn Util.log_exception(@config.logger, "Error evaluating feature flag \"#{key}\"", exn) detail = Evaluator.error_result(EvaluationReason::ERROR_EXCEPTION, default) record_flag_eval_error(feature, context, default, detail.reason, with_reasons) - detail + [detail, feature, exn.to_s] end end @@ -507,7 +593,9 @@ def evaluate_internal(key, context, default, with_reasons) default, add_experiment_data || flag[:trackEvents] || false, flag[:debugEventsUntilDate], - nil + nil, + flag[:samplingRatio], + !!flag[:excludeFromSummaries] ) end @@ -523,13 +611,15 @@ def evaluate_internal(key, context, default, with_reasons) nil, add_experiment_data || prereq_flag[:trackEvents] || false, prereq_flag[:debugEventsUntilDate], - prereq_of_flag[:key] + prereq_of_flag[:key], + prereq_flag[:samplingRatio], + !!prereq_flag[:excludeFromSummaries] ) end private def record_flag_eval_error(flag, context, default, reason, with_reasons) @event_processor.record_eval_event(context, flag[:key], flag[:version], nil, default, with_reasons ? reason : nil, default, - flag[:trackEvents], flag[:debugEventsUntilDate], nil) + flag[:trackEvents], flag[:debugEventsUntilDate], nil, flag[:samplingRatio], !!flag[:excludeFromSummaries]) end # @@ -541,7 +631,7 @@ def evaluate_internal(key, context, default, with_reasons) # private def record_unknown_flag_eval(flag_key, context, default, reason, with_reasons) @event_processor.record_eval_event(context, flag_key, nil, nil, default, with_reasons ? reason : nil, default, - false, nil, nil) + false, nil, nil, 1, false) end private def experiment?(flag, reason) diff --git a/lib/ldclient-rb/migrations.rb b/lib/ldclient-rb/migrations.rb new file mode 100644 index 00000000..dafd62e3 --- /dev/null +++ b/lib/ldclient-rb/migrations.rb @@ -0,0 +1,230 @@ +require 'ldclient-rb/impl/migrations/migrator' + +module LaunchDarkly + # + # Namespace for feature-flag based technology migration support. + # + module Migrations + # Symbol representing the old origin, or the old technology source you are migrating away from. + ORIGIN_OLD = :old + # Symbol representing the new origin, or the new technology source you are migrating towards. + ORIGIN_NEW = :new + + # Symbol defining a read-related operation + OP_READ = :read + # Symbol defining a write-related operation + OP_WRITE = :write + + STAGE_OFF = :off + STAGE_DUALWRITE = :dualwrite + STAGE_SHADOW = :shadow + STAGE_LIVE = :live + STAGE_RAMPDOWN = :rampdown + STAGE_COMPLETE = :complete + + VALID_OPERATIONS = [ + OP_READ, + OP_WRITE, + ] + + VALID_ORIGINS = [ + ORIGIN_OLD, + ORIGIN_NEW, + ] + + VALID_STAGES = [ + STAGE_OFF, + STAGE_DUALWRITE, + STAGE_SHADOW, + STAGE_LIVE, + STAGE_RAMPDOWN, + STAGE_COMPLETE, + ] + + # + # The OperationResult wraps the {LaunchDarkly::Result} class to tie an operation origin to a result. + # + class OperationResult + extend Forwardable + def_delegators :@result, :value, :error, :exception, :success? + + # + # @param origin [Symbol] + # @param result [LaunchDarkly::Result] + # + def initialize(origin, result) + @origin = origin + @result = result + end + + # + # @return [Symbol] The origin this result is associated with. + # + attr_reader :origin + end + + # + # A write result contains the operation results against both the authoritative and non-authoritative origins. + # + # Authoritative writes are always executed first. In the event of a failure, the non-authoritative write will not + # be executed, resulting in a nil value in the final WriteResult. + # + class WriteResult + # + # @param authoritative [OperationResult] + # @param nonauthoritative [OperationResult, nil] + # + def initialize(authoritative, nonauthoritative = nil) + @authoritative = authoritative + @nonauthoritative = nonauthoritative + end + + # + # Returns the operation result for the authoritative origin. + # + # @return [OperationResult] + # + attr_reader :authoritative + + # + # Returns the operation result for the non-authoritative origin. + # + # This result might be nil as the non-authoritative write does not execute in every stage, and will not execute + # if the authoritative write failed. + # + # @return [OperationResult, nil] + # + attr_reader :nonauthoritative + end + + + # + # The migration builder is used to configure and construct an instance of a + # {LaunchDarkly::Interfaces::Migrations::Migrator}. This migrator can be used to perform LaunchDarkly assisted + # technology migrations through the use of migration-based feature flags. + # + class MigratorBuilder + EXECUTION_SERIAL = :serial + EXECUTION_RANDOM = :random + EXECUTION_PARALLEL = :parallel + + VALID_EXECUTION_ORDERS = [EXECUTION_SERIAL, EXECUTION_RANDOM, EXECUTION_PARALLEL] + private_constant :VALID_EXECUTION_ORDERS + + # + # @param client [LaunchDarkly::LDClient] + # + def initialize(client) + @client = client + + # Default settings as required by the spec + @read_execution_order = EXECUTION_PARALLEL + @measure_latency = true + @measure_errors = true + + @read_config = nil # @type [LaunchDarkly::Impl::Migrations::MigrationConfig, nil] + @write_config = nil # @type [LaunchDarkly::Impl::Migrations::MigrationConfig, nil] + end + + # + # The read execution order influences the parallelism and execution order for read operations involving multiple + # origins. + # + # @param order [Symbol] + # + def read_execution_order(order) + return unless VALID_EXECUTION_ORDERS.include? order + + @read_execution_order = order + end + + # + # Enable or disable latency tracking for migration operations. This latency information can be sent upstream to + # LaunchDarkly to enhance migration visibility. + # + # @param enabled [Boolean] + # + def track_latency(enabled) + @measure_latency = !!enabled + end + + # + # Enable or disable error tracking for migration operations. This error information can be sent upstream to + # LaunchDarkly to enhance migration visibility. + # + # @param enabled [Boolean] + # + def track_errors(enabled) + @measure_errors = !!enabled + end + + # + # Read can be used to configure the migration-read behavior of the resulting + # {LaunchDarkly::Interfaces::Migrations::Migrator} instance. + # + # Users are required to provide two different read methods -- one to read from the old migration origin, and one + # to read from the new origin. Additionally, customers can opt-in to consistency tracking by providing a + # comparison function. + # + # Depending on the migration stage, one or both of these read methods may be called. + # + # The read methods should accept a single nullable parameter. This parameter is a payload passed through the + # {LaunchDarkly::Interfaces::Migrations::Migrator#read} method. This method should return a {LaunchDarkly::Result} + # instance. + # + # The consistency method should accept 2 parameters of any type. These parameters are the results of executing the + # read operation against the old and new origins. If both operations were successful, the consistency method will + # be invoked. This method should return true if the two parameters are equal, or false otherwise. + # + # @param old_read [#call] + # @param new_read [#call] + # @param comparison [#call, nil] + # + def read(old_read, new_read, comparison = nil) + return unless old_read.respond_to?(:call) && old_read.arity == 1 + return unless new_read.respond_to?(:call) && new_read.arity == 1 + return unless comparison.nil? || (comparison.respond_to?(:call) && comparison.arity == 2) + + @read_config = LaunchDarkly::Impl::Migrations::MigrationConfig.new(old_read, new_read, comparison) + end + + # + # Write can be used to configure the migration-write behavior of the resulting + # {LaunchDarkly::Interfaces::Migrations::Migrator} instance. + # + # Users are required to provide two different write methods -- one to write to the old migration origin, and one + # to write to the new origin. + # + # Depending on the migration stage, one or both of these write methods may be called. + # + # The write methods should accept a single nullable parameter. This parameter is a payload passed through the + # {LaunchDarkly::Interfaces::Migrations::Migrator#write} method. This method should return a {LaunchDarkly::Result} + # instance. + # + # @param old_write [#call] + # @param new_write [#call] + # + def write(old_write, new_write) + return unless old_write.respond_to?(:call) && old_write.arity == 1 + return unless new_write.respond_to?(:call) && new_write.arity == 1 + + @write_config = LaunchDarkly::Impl::Migrations::MigrationConfig.new(old_write, new_write, nil) + end + + # + # Build constructs a {LaunchDarkly::Interfaces::Migrations::Migrator} instance to support migration-based reads + # and writes. A string describing any failure conditions will be returned if the build fails. + # + # @return [LaunchDarkly::Interfaces::Migrations::Migrator, string] + # + def build + return "client not provided" if @client.nil? + return "read configuration not provided" if @read_config.nil? + return "write configuration not provided" if @write_config.nil? + + LaunchDarkly::Impl::Migrations::Migrator.new(@client, @read_execution_order, @read_config, @write_config, @measure_latency, @measure_errors) + end + end + + end +end diff --git a/lib/ldclient-rb/util.rb b/lib/ldclient-rb/util.rb index 5fcc23d4..a7d2cac0 100644 --- a/lib/ldclient-rb/util.rb +++ b/lib/ldclient-rb/util.rb @@ -2,6 +2,69 @@ require "http" module LaunchDarkly + # + # A Result is used to reflect the outcome of any operation. + # + # Results can either be considered a success or a failure. + # + # In the event of success, the Result will contain an option, nullable value to hold any success value back to the + # calling function. + # + # If the operation fails, the Result will contain an error describing the value. + # + class Result + # + # Create a successful result with the provided value. + # + # @param value [Object, nil] + # @return [Result] + # + def self.success(value) + Result.new(value) + end + + # + # Create a failed result with the provided error description. + # + # @param error [String] + # @param exception [Exception, nil] + # @return [Result] + # + def self.fail(error, exception = nil) + Result.new(nil, error, exception) + end + + # + # Was this result successful or did it encounter an error? + # + # @return [Boolean] + # + def success? + @error.nil? + end + + # + # @return [Object, nil] The value returned from the operation if it was successful; nil otherwise. + # + attr_reader :value + + # + # @return [String, nil] An error description of the failure; nil otherwise + # + attr_reader :error + + # + # @return [Exception, nil] An optional exception which caused the failure + # + attr_reader :exception + + private def initialize(value, error = nil, exception = nil) + @value = value + @error = error + @exception = exception + end + end + # @private module Util # diff --git a/spec/config_spec.rb b/spec/config_spec.rb index 5a1c2522..8c6c28ac 100644 --- a/spec/config_spec.rb +++ b/spec/config_spec.rb @@ -94,31 +94,5 @@ module LaunchDarkly end end end - - describe "context and user aliases" do - it "default values are aliased correctly" do - expect(Config.default_context_keys_capacity).to eq Config.default_user_keys_capacity - expect(Config.default_context_keys_flush_interval).to eq Config.default_user_keys_flush_interval - end - - it "context options are reflected in user options" do - config = subject.new(context_keys_capacity: 50, context_keys_flush_interval: 25) - expect(config.context_keys_capacity).to eq config.user_keys_capacity - expect(config.context_keys_flush_interval).to eq config.user_keys_flush_interval - end - - it "context options can be set by user options" do - config = subject.new(user_keys_capacity: 50, user_keys_flush_interval: 25) - expect(config.context_keys_capacity).to eq config.user_keys_capacity - expect(config.context_keys_flush_interval).to eq config.user_keys_flush_interval - end - - it "context options take precedence" do - config = subject.new(context_keys_capacity: 100, user_keys_capacity: 50, context_keys_flush_interval: 100, user_keys_flush_interval: 50) - - expect(config.context_keys_capacity).to eq 100 - expect(config.context_keys_flush_interval).to eq 100 - end - end end end diff --git a/spec/context_spec.rb b/spec/context_spec.rb index 7ac83d6a..c9d9792c 100644 --- a/spec/context_spec.rb +++ b/spec/context_spec.rb @@ -19,73 +19,6 @@ module LaunchDarkly end describe "context construction" do - describe "legacy users contexts" do - it "can be created using the legacy user format" do - context = { - key: "user-key", - custom: { - address: { - street: "123 Main St.", - city: "Every City", - state: "XX", - }, - }, - } - result = subject.create(context) - expect(result).to be_a(LDContext) - expect(result.key).to eq("user-key") - expect(result.kind).to eq("user") - expect(result.valid?).to be true - end - - it "allows an empty string for a key, but it cannot be missing or nil" do - expect(subject.create({ key: "" }).valid?).to be true - expect(subject.create({ key: nil }).valid?).to be false - expect(subject.create({}).valid?).to be false - end - - it "anonymous is required to be a boolean or nil" do - expect(subject.create({ key: "" }).valid?).to be true - expect(subject.create({ key: "", anonymous: true }).valid?).to be true - expect(subject.create({ key: "", anonymous: false }).valid?).to be true - expect(subject.create({ key: "", anonymous: 0 }).valid?).to be false - end - - it "name is required to be a string or nil" do - expect(subject.create({ key: "" }).valid?).to be true - expect(subject.create({ key: "", name: "My Name" }).valid?).to be true - expect(subject.create({ key: "", name: 0 }).valid?).to be false - end - - it "creates the correct fully qualified key" do - expect(subject.create({ key: "user-key" }).fully_qualified_key).to eq("user-key") - end - - it "requires privateAttributeNames to be an array" do - context = { - key: "user-key", - privateAttributeNames: "not an array", - } - expect(subject.create(context).valid?).to be false - end - - it "overwrite custom properties with built-ins when collisions occur" do - context = { - key: "user-key", - ip: "192.168.1.1", - avatar: "avatar", - custom: { - ip: "127.0.0.1", - avatar: "custom avatar", - }, - } - - result = subject.create(context) - expect(result.get_value(:ip)).to eq("192.168.1.1") - expect(result.get_value(:avatar)).to eq("avatar") - end - end - describe "single kind contexts" do it "can be created using the new format" do context = { @@ -151,7 +84,7 @@ module LaunchDarkly describe "multi-kind contexts" do it "can be created from single kind contexts" do - user_context = subject.create({ key: "user-key" }) + user_context = subject.create({ key: "user-key", kind: "user" }) org_context = subject.create({ key: "org-key", kind: "org" }) multi_context = subject.create_multi([user_context, org_context]) @@ -172,7 +105,7 @@ module LaunchDarkly end it "will return the single kind context if only one is provided" do - user_context = subject.create({ key: "user-key" }) + user_context = subject.create({ kind: 'user', key: "user-key" }) multi_context = subject.create_multi([user_context]) expect(multi_context).to be_a(LDContext) @@ -205,7 +138,7 @@ module LaunchDarkly end it "creates the correct fully qualified key" do - user_context = subject.create({ key: "a-user-key" }) + user_context = subject.create({ key: "a-user-key", kind: 'user' }) org_context = subject.create({ key: "b-org-key", kind: "org" }) user_first = subject.create_multi([user_context, org_context]) org_first = subject.create_multi([org_context, user_context]) @@ -305,7 +238,7 @@ module LaunchDarkly describe "supports retrieval" do it "with only support kind for multi-kind contexts" do - user_context = subject.create({ key: 'user', name: 'Ruby', anonymous: true }) + user_context = subject.create({ key: 'user', kind: 'user', name: 'Ruby', anonymous: true }) org_context = subject.create({ key: 'ld', kind: 'org', name: 'LaunchDarkly', anonymous: false }) multi_context = subject.create_multi([user_context, org_context]) @@ -321,7 +254,6 @@ module LaunchDarkly end it "with basic attributes" do - legacy_user = subject.create({ key: 'user', name: 'Ruby', privateAttributeNames: ['name'] }) org_context = subject.create({ key: 'ld', kind: 'org', name: 'LaunchDarkly', anonymous: true, _meta: { privateAttributes: ['name'] } }) [ @@ -336,7 +268,6 @@ module LaunchDarkly ['privateAttributes', be_nil, be_nil], ].each do |(reference, user_matcher, org_matcher)| ref = Reference.create(reference) - expect(legacy_user.get_value_for_reference(ref)).to user_matcher expect(org_context.get_value_for_reference(ref)).to org_matcher end end @@ -346,7 +277,6 @@ module LaunchDarkly tags = ["LaunchDarkly", "Feature Flags"] nested = { upper: { middle: { name: "Middle Level", inner: { levels: [0, 1, 2] } }, name: "Upper Level" } } - legacy_user = subject.create({ key: 'user', name: 'Ruby', custom: { address: address, tags: tags, nested: nested }}) org_context = subject.create({ key: 'ld', kind: 'org', name: 'LaunchDarkly', anonymous: true, address: address, tags: tags, nested: nested }) [ @@ -361,7 +291,6 @@ module LaunchDarkly ['/nested/upper/middle/inner/levels', eq([0, 1, 2])], ].each do |(reference, matcher)| ref = Reference.create(reference) - expect(legacy_user.get_value_for_reference(ref)).to matcher expect(org_context.get_value_for_reference(ref)).to matcher end end diff --git a/spec/events_spec.rb b/spec/events_spec.rb index d01c2714..6ae859e1 100644 --- a/spec/events_spec.rb +++ b/spec/events_spec.rb @@ -14,26 +14,8 @@ module LaunchDarkly let(:default_config) { Config.new(default_config_opts) } let(:context) { LDContext.create({ kind: "user", key: "userkey", name: "Red" }) } - def with_processor_and_sender(config) - sender = FakeEventSender.new - timestamp = starting_timestamp - ep = subject.new("sdk_key", config, nil, nil, { - event_sender: sender, - timestamp_fn: proc { - t = timestamp - timestamp += 1 - t - }, - }) - begin - yield ep, sender - ensure - ep.stop - end - end - it "queues identify event" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| ep.record_identify_event(context) output = flush_and_get_events(ep, sender) @@ -43,7 +25,7 @@ def with_processor_and_sender(config) it "filters context in identify event" do config = Config.new(default_config_opts.merge(all_attributes_private: true)) - with_processor_and_sender(config) do |ep, sender| + with_processor_and_sender(config, starting_timestamp) do |ep, sender| ep.record_identify_event(context) output = flush_and_get_events(ep, sender) @@ -52,7 +34,7 @@ def with_processor_and_sender(config) end it "queues individual feature event with index event" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| flag = { key: "flagkey", version: 11 } ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, true) @@ -65,9 +47,36 @@ def with_processor_and_sender(config) end end + it "does not queue feature events with a sampling ratio of 0" do + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| + sampling_ratio = 0 + ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, true, 0, nil, sampling_ratio) + + output = flush_and_get_events(ep, sender) + expect(output).to contain_exactly( + eq(index_event(default_config, context)), + include(:kind => "summary") + ) + end + end + + it "can exclude feature event from summaries" do + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| + exclude_from_summaries = true + flag = { key: "flagkey", version: 11 } + ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, true, 0, nil, 1, exclude_from_summaries) + + output = flush_and_get_events(ep, sender) + expect(output).to contain_exactly( + eq(index_event(default_config, context)), + eq(feature_event(flag, context, 1, 'value')) + ) + end + end + it "filters context in index event" do config = Config.new(default_config_opts.merge(all_attributes_private: true)) - with_processor_and_sender(config) do |ep, sender| + with_processor_and_sender(config, starting_timestamp) do |ep, sender| flag = { key: "flagkey", version: 11 } ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, true) @@ -82,7 +91,7 @@ def with_processor_and_sender(config) it "filters context in feature event" do config = Config.new(default_config_opts.merge(all_attributes_private: true)) - with_processor_and_sender(config) do |ep, sender| + with_processor_and_sender(config, starting_timestamp) do |ep, sender| flag = { key: "flagkey", version: 11 } ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, true) @@ -96,7 +105,7 @@ def with_processor_and_sender(config) end it "sets event kind to debug if flag is temporarily in debug mode" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| flag = { key: "flagkey", version: 11 } future_time = (Time.now.to_f * 1000).to_i + 1000000 ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, false, future_time) @@ -110,8 +119,22 @@ def with_processor_and_sender(config) end end + it "can disable debug events with 0 sampling ratio" do + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| + flag = { key: "flagkey", version: 11 } + future_time = (Time.now.to_f * 1000).to_i + 1000000 + ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, false, future_time, nil, 0) + + output = flush_and_get_events(ep, sender) + expect(output).to contain_exactly( + eq(index_event(default_config, context)), + include(:kind => "summary") + ) + end + end + it "can be both debugging and tracking an event" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| flag = { key: "flagkey", version: 11 } future_time = (Time.now.to_f * 1000).to_i + 1000000 ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, true, future_time) @@ -127,7 +150,7 @@ def with_processor_and_sender(config) end it "ends debug mode based on client time if client time is later than server time" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| # Pick a server time that is somewhat behind the client time server_time = Time.now - 20 @@ -151,7 +174,7 @@ def with_processor_and_sender(config) end it "ends debug mode based on server time if server time is later than client time" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| # Pick a server time that is somewhat ahead of the client time server_time = Time.now + 20 @@ -174,7 +197,7 @@ def with_processor_and_sender(config) end it "generates only one index event for multiple events with same context" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| flag1 = { key: "flagkey1", version: 11 } flag2 = { key: "flagkey2", version: 22 } @@ -192,7 +215,7 @@ def with_processor_and_sender(config) end it "summarizes non-tracked events" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| ep.record_eval_event(context, 'flagkey1', 11, 1, 'value1', nil, 'default1', false) ep.record_eval_event(context, 'flagkey2', 22, 2, 'value2', nil, 'default2', false) @@ -225,7 +248,7 @@ def with_processor_and_sender(config) end it "queues custom event with context" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| ep.record_custom_event(context, 'eventkey', { thing: 'stuff' }, 1.5) output = flush_and_get_events(ep, sender) @@ -238,7 +261,7 @@ def with_processor_and_sender(config) it "filters context in custom event" do config = Config.new(default_config_opts.merge(all_attributes_private: true)) - with_processor_and_sender(config) do |ep, sender| + with_processor_and_sender(config, starting_timestamp) do |ep, sender| ep.record_custom_event(context, 'eventkey') output = flush_and_get_events(ep, sender) @@ -250,7 +273,7 @@ def with_processor_and_sender(config) end it "treats nil value for custom the same as an empty hash" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| user_with_nil_custom = LDContext.create({ key: "userkey", custom: nil }) ep.record_identify_event(user_with_nil_custom) @@ -260,7 +283,7 @@ def with_processor_and_sender(config) end it "does a final flush when shutting down" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| ep.record_identify_event(context) ep.stop @@ -271,7 +294,7 @@ def with_processor_and_sender(config) end it "sends nothing if there are no events" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| ep.flush ep.wait_until_inactive expect(sender.analytics_payloads.empty?).to be true @@ -279,7 +302,7 @@ def with_processor_and_sender(config) end it "stops posting events after unrecoverable error" do - with_processor_and_sender(default_config) do |ep, sender| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| sender.result = Impl::EventSenderResult.new(false, true, nil) ep.record_identify_event(context) flush_and_get_events(ep, sender) @@ -291,6 +314,205 @@ def with_processor_and_sender(config) end end + describe "migration op events" do + it "have basic required fields" do + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| + reason = EvaluationReason.off + evaluation = EvaluationDetail.new(true, 0, reason) + flag = LaunchDarkly::Impl::Model::FeatureFlag.new({key: "flagkey", variations: [true, false]}) + + event = LaunchDarkly::Impl::MigrationOpEvent.new( + starting_timestamp+1, + context, + flag.key, + flag, + LaunchDarkly::Migrations::OP_READ, + LaunchDarkly::Migrations::STAGE_OFF, + evaluation, + Set.new, + nil, + nil, + Set.new, + {} + ) + ep.record_migration_op_event(event) + + output = flush_and_get_events(ep, sender) + expect(output).to contain_exactly( + eq(migration_op_event(flag, context, 0, true, LaunchDarkly::Migrations::STAGE_OFF, reason, starting_timestamp+1)) + ) + end + end + + it "can be disabled with sampling ratio of 0" do + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| + reason = EvaluationReason.off + evaluation = EvaluationDetail.new(true, 0, reason) + flag = LaunchDarkly::Impl::Model::FeatureFlag.new({key: "flagkey", variations: [true, false], samplingRatio: 0}) + + event = LaunchDarkly::Impl::MigrationOpEvent.new( + starting_timestamp+1, + context, + flag.key, + flag, + LaunchDarkly::Migrations::OP_READ, + LaunchDarkly::Migrations::STAGE_OFF, + evaluation, + Set.new, + nil, + nil, + Set.new, + {} + ) + ep.record_migration_op_event(event) + + # We have to send some event so we know events are actually being processed + ep.record_identify_event(context) + output = flush_and_get_events(ep, sender) + expect(output).to contain_exactly(eq(identify_event(default_config, context))) + end + end + + it "reports invoked" do + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| + event = LaunchDarkly::Impl::MigrationOpEvent.new( + starting_timestamp+1, + context, + "flagkey", + LaunchDarkly::Impl::Model::FeatureFlag.new({key: "flagkey", variations: [true, false]}), + LaunchDarkly::Migrations::OP_READ, + LaunchDarkly::Migrations::STAGE_OFF, + EvaluationDetail.new(true, 0, EvaluationReason.off), + Set[LaunchDarkly::Migrations::ORIGIN_OLD, LaunchDarkly::Migrations::ORIGIN_NEW], + nil, + nil, + Set.new, + {} + ) + ep.record_migration_op_event(event) + + output = flush_and_get_events(ep, sender) + measurement = output[0][:measurements][0] + + expect(measurement[:key]).to eq("invoked") + expect(measurement[:values]).to include({"old": true}) + expect(measurement[:values]).to include({"new": true}) + end + end + + it "reports latency" do + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| + event = LaunchDarkly::Impl::MigrationOpEvent.new( + starting_timestamp+1, + context, + "flagkey", + LaunchDarkly::Impl::Model::FeatureFlag.new({key: "flagkey", variations: [true, false]}), + LaunchDarkly::Migrations::OP_READ, + LaunchDarkly::Migrations::STAGE_OFF, + EvaluationDetail.new(true, 0, EvaluationReason.off), + Set.new, + nil, + nil, + Set.new, + {LaunchDarkly::Migrations::ORIGIN_OLD => 12.3, LaunchDarkly::Migrations::ORIGIN_NEW => 10.8} + ) + ep.record_migration_op_event(event) + + output = flush_and_get_events(ep, sender) + measurement = output[0][:measurements][0] + + expect(measurement[:key]).to eq("latency_ms") + expect(measurement[:values][:old]).to eq(12.3) + expect(measurement[:values][:new]).to eq(10.8) + end + end + + it "reports errors" do + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| + event = LaunchDarkly::Impl::MigrationOpEvent.new( + starting_timestamp+1, + context, + "flagkey", + LaunchDarkly::Impl::Model::FeatureFlag.new({key: "flagkey", variations: [true, false]}), + LaunchDarkly::Migrations::OP_READ, + LaunchDarkly::Migrations::STAGE_OFF, + EvaluationDetail.new(true, 0, EvaluationReason.off), + Set.new, + nil, + nil, + Set[LaunchDarkly::Migrations::ORIGIN_OLD, LaunchDarkly::Migrations::ORIGIN_NEW], + {} + ) + ep.record_migration_op_event(event) + + output = flush_and_get_events(ep, sender) + measurement = output[0][:measurements][0] + + expect(measurement[:key]).to eq("error") + expect(measurement[:values]).to include({"old": true}) + expect(measurement[:values]).to include({"new": true}) + end + end + + describe "reports consistency" do + it "when the check is true or false" do + [true, false].each do |is_consistent| + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| + event = LaunchDarkly::Impl::MigrationOpEvent.new( + starting_timestamp+1, + context, + "flagkey", + LaunchDarkly::Impl::Model::FeatureFlag.new({key: "flagkey", variations: [true, false]}), + LaunchDarkly::Migrations::OP_READ, + LaunchDarkly::Migrations::STAGE_OFF, + EvaluationDetail.new(true, 0, EvaluationReason.off), + Set.new, + is_consistent, + nil, + Set.new, + {} + ) + ep.record_migration_op_event(event) + + output = flush_and_get_events(ep, sender) + measurement = output[0][:measurements][0] + + expect(measurement[:key]).to eq("consistent") + expect(measurement[:value]).to be(is_consistent) + expect(measurement[:samplingRatio]).to be_nil + end + end + end + + it "sampling ratio when non-nil / non-one" do + with_processor_and_sender(default_config, starting_timestamp) do |ep, sender| + event = LaunchDarkly::Impl::MigrationOpEvent.new( + starting_timestamp+1, + context, + "flagkey", + LaunchDarkly::Impl::Model::FeatureFlag.new({key: "flagkey", variations: [true, false]}), + LaunchDarkly::Migrations::OP_READ, + LaunchDarkly::Migrations::STAGE_OFF, + EvaluationDetail.new(true, 0, EvaluationReason.off), + Set.new, + true, + 3, + Set.new, + {} + ) + ep.record_migration_op_event(event) + + output = flush_and_get_events(ep, sender) + measurement = output[0][:measurements][0] + + expect(measurement[:key]).to eq("consistent") + expect(measurement[:value]).to be(true) + expect(measurement[:samplingRatio]).to be(3) + end + end + end + end + describe "diagnostic events" do let(:default_id) { Impl::DiagnosticAccumulator.create_diagnostic_id('sdk_key') } let(:diagnostic_config) { Config.new(diagnostic_opt_out: false, logger: $null_log) } @@ -422,6 +644,33 @@ def feature_event(flag, context, variation, value, timestamp = starting_timestam JSON.parse(out.to_json, symbolize_names: true) end + # + # @param flag [LaunchDarkly::Impl::Models::FeatureFlag] + # @param context [LDContext] + # @param variation [Integer] + # @param value [any] + # @param default [Symbol] + # @param reason [LaunchDarkly::Impl::Models::EvaluationReason] + # @param timestamp [Integer] + # @return [Hash] + # + def migration_op_event(flag, context, variation, value, default, reason, timestamp = starting_timestamp) + out = { + kind: 'migration_op', + operation: 'read', + creationDate: timestamp, + contextKeys: context.keys, + evaluation: { + default: default.to_s, + key: flag.key, + value: value, + variation: variation, + reason: reason, + }, + } + JSON.parse(out.to_json, symbolize_names: true) + end + # # @param config [Config] # @param flag [Hash] @@ -470,22 +719,5 @@ def flush_and_get_events(ep, sender) ep.wait_until_inactive sender.analytics_payloads.pop end - - class FakeEventSender - attr_accessor :result - attr_reader :analytics_payloads - attr_reader :diagnostic_payloads - - def initialize - @result = Impl::EventSenderResult.new(true, false, nil) - @analytics_payloads = Queue.new - @diagnostic_payloads = Queue.new - end - - def send_event_data(data, description, is_diagnostic) - (is_diagnostic ? @diagnostic_payloads : @analytics_payloads).push(JSON.parse(data, symbolize_names: true)) - @result - end - end end end diff --git a/spec/events_test_util.rb b/spec/events_test_util.rb index 672360b3..b1a7c376 100644 --- a/spec/events_test_util.rb +++ b/spec/events_test_util.rb @@ -13,3 +13,49 @@ def make_identify_event(timestamp, context) def make_custom_event(timestamp, context, key, data = nil, metric_value = nil) LaunchDarkly::Impl::CustomEvent.new(timestamp, context, key, data, metric_value) end + +def with_processor_and_sender(config, starting_timestamp) + sender = FakeEventSender.new + timestamp = starting_timestamp + ep = LaunchDarkly::EventProcessor.new("sdk_key", config, nil, nil, { + event_sender: sender, + timestamp_fn: proc { + t = timestamp + timestamp += 1 + t + }, + }) + + begin + yield ep, sender + ensure + ep.stop + end +end + +class FakeEventSender + attr_accessor :result + attr_reader :analytics_payloads + attr_reader :diagnostic_payloads + + def initialize + @result = LaunchDarkly::Impl::EventSenderResult.new(true, false, nil) + @analytics_payloads = Queue.new + @diagnostic_payloads = Queue.new + end + + def send_event_data(data, description, is_diagnostic) + (is_diagnostic ? @diagnostic_payloads : @analytics_payloads).push(JSON.parse(data, symbolize_names: true)) + @result + end +end + +# +# Overwrites the client's event process with an instance which captures events into the FakeEventSender. +# +# @param client [LaunchDarkly::LDClient] +# @param ep [LaunchDarkly::EventProcessor] +# +def override_client_event_processor(client, ep) + client.instance_variable_set(:@event_processor, ep) +end diff --git a/spec/expiring_cache_spec.rb b/spec/expiring_cache_spec.rb index 0270fd25..9ce6f671 100644 --- a/spec/expiring_cache_spec.rb +++ b/spec/expiring_cache_spec.rb @@ -1,4 +1,5 @@ require 'timecop' +require "ldclient-rb/expiring_cache" module LaunchDarkly describe ExpiringCache do diff --git a/spec/impl/evaluator_bucketing_spec.rb b/spec/impl/evaluator_bucketing_spec.rb index 2c761d67..37f1ba86 100644 --- a/spec/impl/evaluator_bucketing_spec.rb +++ b/spec/impl/evaluator_bucketing_spec.rb @@ -10,21 +10,21 @@ module Impl describe "seed exists" do let(:seed) { 61 } it "returns the expected bucket values for seed" do - context = LaunchDarkly::LDContext.create({ key: "userKeyA" }) + context = LaunchDarkly::LDContext.create({ key: "userKeyA", kind: "user" }) bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", seed) expect(bucket).to be_within(0.0000001).of(0.09801207) - context = LaunchDarkly::LDContext.create({ key: "userKeyB" }) + context = LaunchDarkly::LDContext.create({ key: "userKeyB", kind: "user" }) bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", seed) expect(bucket).to be_within(0.0000001).of(0.14483777) - context = LaunchDarkly::LDContext.create({ key: "userKeyC" }) + context = LaunchDarkly::LDContext.create({ key: "userKeyC", kind: "user" }) bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", seed) expect(bucket).to be_within(0.0000001).of(0.9242641) end it "returns the same bucket regardless of hashKey and salt" do - context = LaunchDarkly::LDContext.create({ key: "userKeyA" }) + context = LaunchDarkly::LDContext.create({ key: "userKeyA", kind: "user" }) bucket1 = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", seed) bucket2 = subject.bucket_context(context, context.kind, "hashKey1", "key", "saltyB", seed) bucket3 = subject.bucket_context(context, context.kind, "hashKey2", "key", "saltyC", seed) @@ -33,15 +33,15 @@ module Impl end it "returns a different bucket if the seed is not the same" do - context = LaunchDarkly::LDContext.create({ key: "userKeyA" }) + context = LaunchDarkly::LDContext.create({ key: "userKeyA", kind: "user" }) bucket1 = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", seed) bucket2 = subject.bucket_context(context, context.kind, "hashKey1", "key", "saltyB", seed + 1) expect(bucket1).to_not eq(bucket2) end it "returns a different bucket if the context is not the same" do - context1 = LaunchDarkly::LDContext.create({ key: "userKeyA" }) - context2 = LaunchDarkly::LDContext.create({ key: "userKeyB" }) + context1 = LaunchDarkly::LDContext.create({ key: "userKeyA", kind: "user" }) + context2 = LaunchDarkly::LDContext.create({ key: "userKeyB", kind: "user" }) bucket1 = subject.bucket_context(context1, context1.kind, "hashKey", "key", "saltyA", seed) bucket2 = subject.bucket_context(context2, context2.kind, "hashKey1", "key", "saltyB", seed) expect(bucket1).to_not eq(bucket2) @@ -49,15 +49,15 @@ module Impl end it "gets expected bucket values for specific keys" do - context = LaunchDarkly::LDContext.create({ key: "userKeyA" }) + context = LaunchDarkly::LDContext.create({ key: "userKeyA", kind: "user" }) bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", nil) expect(bucket).to be_within(0.0000001).of(0.42157587) - context = LaunchDarkly::LDContext.create({ key: "userKeyB" }) + context = LaunchDarkly::LDContext.create({ key: "userKeyB", kind: "user" }) bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", nil) expect(bucket).to be_within(0.0000001).of(0.6708485) - context = LaunchDarkly::LDContext.create({ key: "userKeyC" }) + context = LaunchDarkly::LDContext.create({ key: "userKeyC", kind: "user" }) bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", nil) expect(bucket).to be_within(0.0000001).of(0.10343106) end @@ -74,10 +74,9 @@ module Impl it "can bucket by int value (equivalent to string)" do context = LaunchDarkly::LDContext.create({ key: "userkey", - custom: { - stringAttr: "33333", - intAttr: 33333, - }, + kind: "user", + stringAttr: "33333", + intAttr: 33333, }) string_result = subject.bucket_context(context, context.kind, "hashKey", "stringAttr", "saltyA", nil) int_result = subject.bucket_context(context, context.kind, "hashKey", "intAttr", "saltyA", nil) @@ -89,9 +88,8 @@ module Impl it "cannot bucket by float value" do context = LaunchDarkly::LDContext.create({ key: "userkey", - custom: { - floatAttr: 33.5, - }, + kind: "user", + floatAttr: 33.5, }) result = subject.bucket_context(context, context.kind, "hashKey", "floatAttr", "saltyA", nil) expect(result).to eq(0.0) @@ -100,9 +98,8 @@ module Impl it "cannot bucket by bool value" do context = LaunchDarkly::LDContext.create({ key: "userkey", - custom: { - boolAttr: true, - }, + kind: "user", + boolAttr: true, }) result = subject.bucket_context(context, context.kind, "hashKey", "boolAttr", "saltyA", nil) expect(result).to eq(0.0) @@ -112,7 +109,7 @@ module Impl describe "variation_index_for_context" do context "rollout is not an experiment" do it "matches bucket" do - context = LaunchDarkly::LDContext.create({ key: "userkey" }) + context = LaunchDarkly::LDContext.create({ key: "userkey", kind: "user" }) flag_key = "flagkey" salt = "salt" @@ -143,7 +140,7 @@ module Impl end it "uses last bucket if bucket value is equal to total weight" do - context = LaunchDarkly::LDContext.create({ key: "userkey" }) + context = LaunchDarkly::LDContext.create({ key: "userkey", kind: "user" }) flag_key = "flagkey" salt = "salt" @@ -167,9 +164,9 @@ module Impl context "rollout is an experiment" do it "returns whether context is in the experiment or not" do - context1 = LaunchDarkly::LDContext.create({ key: "userKeyA" }) - context2 = LaunchDarkly::LDContext.create({ key: "userKeyB" }) - context3 = LaunchDarkly::LDContext.create({ key: "userKeyC" }) + context1 = LaunchDarkly::LDContext.create({ key: "userKeyA", kind: "user" }) + context2 = LaunchDarkly::LDContext.create({ key: "userKeyB", kind: "user" }) + context3 = LaunchDarkly::LDContext.create({ key: "userKeyC", kind: "user" }) flag_key = "flagkey" salt = "salt" seed = 61 @@ -198,7 +195,7 @@ module Impl end it "uses last bucket if bucket value is equal to total weight" do - context = LaunchDarkly::LDContext.create({ key: "userkey" }) + context = LaunchDarkly::LDContext.create({ key: "userkey", kind: "user" }) flag_key = "flagkey" salt = "salt" seed = 61 diff --git a/spec/impl/evaluator_clause_spec.rb b/spec/impl/evaluator_clause_spec.rb index e6245c7e..de323793 100644 --- a/spec/impl/evaluator_clause_spec.rb +++ b/spec/impl/evaluator_clause_spec.rb @@ -6,35 +6,35 @@ module Impl describe "Evaluator (clauses)" do describe "evaluate", :evaluator_spec_base => true do it "can match built-in attribute" do - context = LDContext.create({ key: 'x', name: 'Bob' }) + context = LDContext.create({ key: 'x', kind: 'user', name: 'Bob' }) clause = { attribute: 'name', op: 'in', values: ['Bob'] } flag = Flags.boolean_flag_with_clauses(clause) expect(basic_evaluator.evaluate(flag, context).detail.value).to be true end it "can match custom attribute" do - context = LDContext.create({ key: 'x', name: 'Bob', custom: { legs: 4 } }) + context = LDContext.create({ key: 'x', kind: 'user', name: 'Bob', legs: 4 }) clause = { attribute: 'legs', op: 'in', values: [4] } flag = Flags.boolean_flag_with_clauses(clause) expect(basic_evaluator.evaluate(flag, context).detail.value).to be true end it "returns false for missing attribute" do - context = LDContext.create({ key: 'x', name: 'Bob' }) + context = LDContext.create({ key: 'x', kind: 'user', name: 'Bob' }) clause = { attribute: 'legs', op: 'in', values: [4] } flag = Flags.boolean_flag_with_clauses(clause) expect(basic_evaluator.evaluate(flag, context).detail.value).to be false end it "returns false for unknown operator" do - context = LDContext.create({ key: 'x', name: 'Bob' }) + context = LDContext.create({ key: 'x', kind: 'user', name: 'Bob' }) clause = { attribute: 'name', op: 'unknown', values: [4] } flag = Flags.boolean_flag_with_clauses(clause) expect(basic_evaluator.evaluate(flag, context).detail.value).to be false end it "does not stop evaluating rules after clause with unknown operator" do - context = LDContext.create({ key: 'x', name: 'Bob' }) + context = LDContext.create({ key: 'x', kind: 'user', name: 'Bob' }) clause0 = { attribute: 'name', op: 'unknown', values: [4] } rule0 = { clauses: [ clause0 ], variation: 1 } clause1 = { attribute: 'name', op: 'in', values: ['Bob'] } @@ -44,7 +44,7 @@ module Impl end it "can be negated" do - context = LDContext.create({ key: 'x', name: 'Bob' }) + context = LDContext.create({ key: 'x', kind: 'user', name: 'Bob' }) clause = { attribute: 'name', op: 'in', values: ['Bob'], negate: true } flag = Flags.boolean_flag_with_clauses(clause) expect(basic_evaluator.evaluate(flag, context).detail.value).to be false @@ -67,7 +67,7 @@ module Impl it "clause match by kind attribute" do clause = { attribute: 'kind', op: 'startsWith', values: ['a'] } - context1 = LDContext.create({ key: 'key' }) + context1 = LDContext.create({ key: 'key', kind: 'user' }) context2 = LDContext.create({ key: 'key', kind: 'ab' }) context3 = LDContext.create_multi( [ diff --git a/spec/impl/evaluator_rule_spec.rb b/spec/impl/evaluator_rule_spec.rb index a41558e6..985b47a7 100644 --- a/spec/impl/evaluator_rule_spec.rb +++ b/spec/impl/evaluator_rule_spec.rb @@ -8,7 +8,7 @@ module Impl it "matches context from rules" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], variation: 1 } flag = Flags.boolean_flag_with_rules(rule) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new(true, 1, EvaluationReason::rule_match(0, 'ruleid')) result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) @@ -18,7 +18,7 @@ module Impl it "reuses rule match result detail instances" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], variation: 1 } flag = Flags.boolean_flag_with_rules(rule) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) result1 = basic_evaluator.evaluate(flag, context) result2 = basic_evaluator.evaluate(flag, context) expect(result1.detail.reason.rule_id).to eq 'ruleid' @@ -28,7 +28,7 @@ module Impl it "returns an error if rule variation is too high" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], variation: 999 } flag = Flags.boolean_flag_with_rules(rule) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) result = basic_evaluator.evaluate(flag, context) @@ -39,7 +39,7 @@ module Impl it "returns an error if rule variation is negative" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], variation: -1 } flag = Flags.boolean_flag_with_rules(rule) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) result = basic_evaluator.evaluate(flag, context) @@ -50,7 +50,7 @@ module Impl it "returns an error if rule has neither variation nor rollout" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }] } flag = Flags.boolean_flag_with_rules(rule) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) result = basic_evaluator.evaluate(flag, context) @@ -62,7 +62,7 @@ module Impl rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], rollout: { variations: [] } } flag = Flags.boolean_flag_with_rules(rule) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) result = basic_evaluator.evaluate(flag, context) @@ -70,20 +70,12 @@ module Impl expect(result.prereq_evals).to eq(nil) end - it "coerces context key to a string for evaluation" do - clause = { attribute: 'key', op: 'in', values: ['999'] } - flag = Flags.boolean_flag_with_clauses(clause) - context = LDContext.create({ key: 999 }) - result = basic_evaluator.evaluate(flag, context) - expect(result.detail.value).to eq(true) - end - describe "rule experiment/rollout behavior" do it "evaluates rollout for rule" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], rollout: { variations: [ { weight: 100000, variation: 1, untracked: false } ] } } flag = Flags.boolean_flag_with_rules(rule) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new(true, 1, EvaluationReason::rule_match(0, 'ruleid')) result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) @@ -94,7 +86,7 @@ module Impl rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], rollout: { variations: [ { weight: 100000, variation: 1, untracked: false } ] } } flag = Flags.boolean_flag_with_rules(rule) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new(true, 1, EvaluationReason::rule_match(0, 'ruleid')) result1 = basic_evaluator.evaluate(flag, context) result2 = basic_evaluator.evaluate(flag, context) @@ -106,7 +98,7 @@ module Impl rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], rollout: { kind: 'experiment', variations: [ { weight: 100000, variation: 1, untracked: false } ] } } flag = Flags.boolean_flag_with_rules(rule) - context = LDContext.create({ key: "userkey" }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(true) @@ -116,7 +108,7 @@ module Impl rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], rollout: { kind: 'rollout', variations: [ { weight: 100000, variation: 1, untracked: false } ] } } flag = Flags.boolean_flag_with_rules(rule) - context = LDContext.create({ key: "userkey" }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to_not include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(nil) @@ -126,7 +118,7 @@ module Impl rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], rollout: { kind: 'experiment', variations: [ { weight: 100000, variation: 1, untracked: true } ] } } flag = Flags.boolean_flag_with_rules(rule) - context = LDContext.create({ key: "userkey" }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to_not include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(nil) diff --git a/spec/impl/evaluator_spec.rb b/spec/impl/evaluator_spec.rb index 0828e957..23bdf8de 100644 --- a/spec/impl/evaluator_spec.rb +++ b/spec/impl/evaluator_spec.rb @@ -131,7 +131,7 @@ module Impl offVariation: 1, variations: %w[a b c], }) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) @@ -146,7 +146,7 @@ module Impl offVariation: 1, variations: %w[a b c], }) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) @@ -161,7 +161,7 @@ module Impl offVariation: 1, variations: %w[a b c], }) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) @@ -176,7 +176,7 @@ module Impl offVariation: 1, variations: %w[a b c], }) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) @@ -194,7 +194,7 @@ module Impl offVariation: 1, variations: %w[a b c], }) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new('c', 2, EvaluationReason::target_match) result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) @@ -212,7 +212,7 @@ module Impl offVariation: 1, variations: %w[a b c], }) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) detail = EvaluationDetail.new('c', 2, EvaluationReason::target_match) result1 = basic_evaluator.evaluate(flag, context) result2 = basic_evaluator.evaluate(flag, context) @@ -262,7 +262,7 @@ module Impl offVariation: 1, variations: %w[a b c], }) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(true) @@ -276,7 +276,7 @@ module Impl offVariation: 1, variations: %w[a b c], }) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to_not include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(nil) @@ -290,7 +290,7 @@ module Impl offVariation: 1, variations: %w[a b c], }) - context = LDContext.create({ key: 'userkey' }) + context = LDContext.create({ key: 'userkey', kind: 'user' }) result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to_not include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(nil) diff --git a/spec/impl/evaluator_spec_base.rb b/spec/impl/evaluator_spec_base.rb index 0ae1747a..4e041f94 100644 --- a/spec/impl/evaluator_spec_base.rb +++ b/spec/impl/evaluator_spec_base.rb @@ -80,6 +80,7 @@ module EvaluatorSpecBase def user_context LDContext::create({ key: "userkey", + kind: "user", email: "test@example.com", name: "Bob", }) diff --git a/spec/impl/event_summarizer_spec.rb b/spec/impl/event_summarizer_spec.rb index 6bfa9574..1aedb757 100644 --- a/spec/impl/event_summarizer_spec.rb +++ b/spec/impl/event_summarizer_spec.rb @@ -9,7 +9,7 @@ module Impl describe EventSummarizer do subject { EventSummarizer } - let(:context) { LaunchDarkly::LDContext.create({ key: "key" }) } + let(:context) { LaunchDarkly::LDContext.create({ key: "key", kind: "user" }) } it "does not add identify event to summary" do es = subject.new diff --git a/spec/impl/migrations/migrator_spec.rb b/spec/impl/migrations/migrator_spec.rb new file mode 100644 index 00000000..a8bb2595 --- /dev/null +++ b/spec/impl/migrations/migrator_spec.rb @@ -0,0 +1,705 @@ +require 'ldclient-rb/interfaces' +require 'ldclient-rb/impl/migrations/migrator' + +require "events_test_util" +require "mock_components" +require "model_builders" + +module LaunchDarkly + module Impl + module Migrations + describe Migrator do + subject { Migrator } + let(:default_config) { LaunchDarkly::Config.new({ diagnostic_opt_out: true, logger: $null_log }) } + let(:data_source) { + td = LaunchDarkly::Integrations::TestData.data_source + + [ + LaunchDarkly::Migrations::STAGE_OFF, + LaunchDarkly::Migrations::STAGE_DUALWRITE, + LaunchDarkly::Migrations::STAGE_SHADOW, + LaunchDarkly::Migrations::STAGE_LIVE, + LaunchDarkly::Migrations::STAGE_RAMPDOWN, + LaunchDarkly::Migrations::STAGE_COMPLETE, + ].each do |stage| + td.update(td.flag(stage.to_s).variations(stage.to_s).variation_for_all(0)) + end + + td + } + + def default_builder(client) + builder = LaunchDarkly::Migrations::MigratorBuilder.new(client) + builder.track_latency(false) + builder.track_errors(false) + + builder.read(->(_) {}, ->(_) {}) + builder.write(->(_) {}, ->(_) {}) + + builder + end + + describe "both operations" do + + describe "pass payload through" do + [ + LaunchDarkly::Migrations::OP_READ, + LaunchDarkly::Migrations::OP_WRITE, + ].each do |op| + it "for #{op}" do + with_client(test_config(data_source: data_source)) do |client| + builder = default_builder(client) + + payload_old = nil + payload_new = nil + + old_callable = ->(payload) { + payload_old = payload + LaunchDarkly::Result.success(nil) + } + + new_callable = ->(payload) { + payload_new = payload + LaunchDarkly::Result.success(nil) + } + + builder.read(old_callable, new_callable) + builder.write(old_callable, new_callable) + + migrator = builder.build + + if op == LaunchDarkly::Migrations::OP_READ + migrator.read(LaunchDarkly::Migrations::STAGE_LIVE.to_s, basic_context, LaunchDarkly::Migrations::STAGE_OFF, "example payload") + else + migrator.write(LaunchDarkly::Migrations::STAGE_LIVE.to_s, basic_context, LaunchDarkly::Migrations::STAGE_OFF, "example payload") + end + + expect(payload_old).to eq("example payload") + expect(payload_new).to eq("example payload") + end + end + end + end + + describe "track invoked" do + [ + {label: "read off", stage: LaunchDarkly::Migrations::STAGE_OFF, op: LaunchDarkly::Migrations::OP_READ, expected: ["old"]}, + {label: "read dual write", stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, op: LaunchDarkly::Migrations::OP_READ, expected: ["old"]}, + {label: "read shadow", stage: LaunchDarkly::Migrations::STAGE_SHADOW, op: LaunchDarkly::Migrations::OP_READ, expected: %w[old new]}, + {label: "read live", stage: LaunchDarkly::Migrations::STAGE_LIVE, op: LaunchDarkly::Migrations::OP_READ, expected: %w[old new]}, + {label: "read ramp down", stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, op: LaunchDarkly::Migrations::OP_READ, expected: ["new"]}, + {label: "read complete", stage: LaunchDarkly::Migrations::STAGE_COMPLETE, op: LaunchDarkly::Migrations::OP_READ, expected: ["new"]}, + + {label: "write off", stage: LaunchDarkly::Migrations::STAGE_OFF, op: LaunchDarkly::Migrations::OP_WRITE, expected: ["old"]}, + {label: "write dual write", stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, op: LaunchDarkly::Migrations::OP_WRITE, expected: %w[old new]}, + {label: "write shadow", stage: LaunchDarkly::Migrations::STAGE_SHADOW, op: LaunchDarkly::Migrations::OP_WRITE, expected: %w[old new]}, + {label: "write live", stage: LaunchDarkly::Migrations::STAGE_LIVE, op: LaunchDarkly::Migrations::OP_WRITE, expected: %w[old new]}, + {label: "write ramp down", stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, op: LaunchDarkly::Migrations::OP_WRITE, expected: %w[old new]}, + {label: "write complete", stage: LaunchDarkly::Migrations::STAGE_COMPLETE, op: LaunchDarkly::Migrations::OP_WRITE, expected: ["new"]}, + ].each do |test_param| + it "for #{test_param[:label]}" do + with_client(test_config(data_source: data_source)) do |client| + with_processor_and_sender(default_config, 0) do |ep, sender| + override_client_event_processor(client, ep) + + builder = default_builder(client) + old_callable = ->(_) { LaunchDarkly::Result.success(nil) } + new_callable = ->(_) { LaunchDarkly::Result.success(nil) } + + builder.read(old_callable, new_callable) + builder.write(old_callable, new_callable) + migrator = builder.build + + if test_param[:op] == LaunchDarkly::Migrations::OP_READ + migrator.read(test_param[:stage], basic_context, LaunchDarkly::Migrations::STAGE_OFF, "example payload") + else + migrator.write(test_param[:stage], basic_context, LaunchDarkly::Migrations::STAGE_OFF, "example payload") + end + + ep.flush + ep.wait_until_inactive + events = sender.analytics_payloads.pop + + expect(events.size).to be(3) # Index, migration op, and summary + + op_event = events[1] + invocations = op_event[:measurements][0] + + expect(invocations[:key]).to eq("invoked") + test_param[:expected].each { |ev| expect(invocations[:values]).to include({ev.to_sym => true}) } + end + end + end + end + end + + describe "track latency" do + [ + {label: "read off", stage: LaunchDarkly::Migrations::STAGE_OFF, op: LaunchDarkly::Migrations::OP_READ, expected: [:old]}, + {label: "read dual write", stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, op: LaunchDarkly::Migrations::OP_READ, expected: [:old]}, + {label: "read shadow", stage: LaunchDarkly::Migrations::STAGE_SHADOW, op: LaunchDarkly::Migrations::OP_READ, expected: [:old, :new]}, + {label: "read live", stage: LaunchDarkly::Migrations::STAGE_LIVE, op: LaunchDarkly::Migrations::OP_READ, expected: [:old, :new]}, + {label: "read ramp down", stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, op: LaunchDarkly::Migrations::OP_READ, expected: [:new]}, + {label: "read complete", stage: LaunchDarkly::Migrations::STAGE_COMPLETE, op: LaunchDarkly::Migrations::OP_READ, expected: [:new]}, + + {label: "write off", stage: LaunchDarkly::Migrations::STAGE_OFF, op: LaunchDarkly::Migrations::OP_WRITE, expected: [:old]}, + {label: "write dual write", stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, op: LaunchDarkly::Migrations::OP_WRITE, expected: [:old, :new]}, + {label: "write shadow", stage: LaunchDarkly::Migrations::STAGE_SHADOW, op: LaunchDarkly::Migrations::OP_WRITE, expected: [:old, :new]}, + {label: "write live", stage: LaunchDarkly::Migrations::STAGE_LIVE, op: LaunchDarkly::Migrations::OP_WRITE, expected: [:old, :new]}, + {label: "write ramp down", stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, op: LaunchDarkly::Migrations::OP_WRITE, expected: [:old, :new]}, + {label: "write complete", stage: LaunchDarkly::Migrations::STAGE_COMPLETE, op: LaunchDarkly::Migrations::OP_WRITE, expected: [:new]}, + ].each do |test_param| + it "for #{test_param[:label]}" do + with_client(test_config(data_source: data_source)) do |client| + with_processor_and_sender(default_config, 0) do |ep, sender| + override_client_event_processor(client, ep) + + builder = default_builder(client) + builder.track_latency(true) + old_callable = ->(_) { sleep(0.1) && LaunchDarkly::Result.success(nil) } + new_callable = ->(_) { sleep(0.1) && LaunchDarkly::Result.success(nil) } + + builder.read(old_callable, new_callable) + builder.write(old_callable, new_callable) + migrator = builder.build + + if test_param[:op] == LaunchDarkly::Migrations::OP_READ + migrator.read(test_param[:stage], basic_context, LaunchDarkly::Migrations::STAGE_OFF, "example payload") + else + migrator.write(test_param[:stage], basic_context, LaunchDarkly::Migrations::STAGE_OFF, "example payload") + end + + ep.flush + ep.wait_until_inactive + events = sender.analytics_payloads.pop + + expect(events.size).to be(3) # Index, migration op, and summary + + op_event = events[1] + latencies = op_event[:measurements][1] # First measurement is invoked + + expect(latencies[:key]).to eq("latency_ms") + test_param[:expected].each { |ev| expect(latencies[:values][ev]).to be >= 0.1 } + end + end + end + end + end + end + + describe "read operations" do + describe "correct origin is run" do + [ + { stage: LaunchDarkly::Migrations::STAGE_OFF, old: true, new: false }, + { stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, old: true, new: false }, + { stage: LaunchDarkly::Migrations::STAGE_SHADOW, old: true, new: true }, + { stage: LaunchDarkly::Migrations::STAGE_LIVE, old: true, new: true }, + { stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, old: false, new: true }, + { stage: LaunchDarkly::Migrations::STAGE_COMPLETE, old: false, new: true }, + ].each do |params| + it "for #{params[:stage]} stage" do + with_client(test_config(data_source: data_source)) do |client| + builder = default_builder(client) + + called_old = false + called_new = false + + builder.read( + ->(_) { + called_old = true + LaunchDarkly::Result.success(nil) + }, + ->(_) { + called_new = true + LaunchDarkly::Result.success(nil) + } + ) + + migrator = builder.build + migrator.read(params[:stage].to_s, basic_context, LaunchDarkly::Migrations::STAGE_OFF) + + expect(called_old).to eq(params[:old]) + expect(called_new).to eq(params[:new]) + end + end + end + end + + describe "handles exceptions from origin methods" do + [ + { stage: LaunchDarkly::Migrations::STAGE_OFF, origin: "old" }, + { stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, origin: "old" }, + { stage: LaunchDarkly::Migrations::STAGE_SHADOW, origin: "old" }, + { stage: LaunchDarkly::Migrations::STAGE_LIVE, origin: "new" }, + { stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, origin: "new" }, + { stage: LaunchDarkly::Migrations::STAGE_COMPLETE, origin: "new" }, + ].each do |params| + it "for #{params[:stage]} stage" do + with_client(test_config(data_source: data_source)) do |client| + builder = default_builder(client) + + builder.read(->(_) { raise "old error" }, ->(_) { raise "new error" }) + + migrator = builder.build + result = migrator.read(params[:stage].to_s, basic_context, LaunchDarkly::Migrations::STAGE_OFF) + + expect(result.success?).to be false + expect(result.error).to eq("'#{params[:origin]}' operation raised an exception") + expect(result.exception.to_s).to eq("#{params[:origin]} error") + end + end + end + end + + describe "support execution order" do + it "parallel" do + with_client(test_config(data_source: data_source)) do |client| + builder = default_builder(client) + + builder.read( + ->(_) { + sleep(0.5) + LaunchDarkly::Result.success(nil) + }, + ->(_) { + sleep(0.5) + LaunchDarkly::Result.success(nil) + } + ) + + migrator = builder.build + + start = Time.now + migrator.read(LaunchDarkly::Migrations::STAGE_SHADOW.to_s, basic_context, LaunchDarkly::Migrations::STAGE_OFF) + duration = Time.now - start + + expect(duration).to be < 1 + end + end + + it "serial" do + with_client(test_config(data_source: data_source)) do |client| + builder = default_builder(client) + builder.read_execution_order(LaunchDarkly::Migrations::MigratorBuilder::EXECUTION_SERIAL) + + builder.read( + ->(_) { + sleep(0.5) + LaunchDarkly::Result.success(nil) + }, + ->(_) { + sleep(0.5) + LaunchDarkly::Result.success(nil) + } + ) + + migrator = builder.build + + start = Time.now + migrator.read(LaunchDarkly::Migrations::STAGE_SHADOW.to_s, basic_context, LaunchDarkly::Migrations::STAGE_OFF) + duration = Time.now - start + + expect(duration).to be >= 1 + end + end + + it "random" do + with_client(test_config(data_source: data_source)) do |client| + builder = default_builder(client) + builder.read_execution_order(LaunchDarkly::Migrations::MigratorBuilder::EXECUTION_RANDOM) + + builder.read( + ->(_) { + sleep(0.5) + LaunchDarkly::Result.success(nil) + }, + ->(_) { + sleep(0.5) + LaunchDarkly::Result.success(nil) + } + ) + + migrator = builder.build + + start = Time.now + migrator.read(LaunchDarkly::Migrations::STAGE_SHADOW.to_s, basic_context, LaunchDarkly::Migrations::STAGE_OFF) + duration = Time.now - start + + # Since it is random, we don't know which decision it would make, so the best we can do is make sure it + # wasn't run in parallel. + expect(duration).to be >= 1 + end + end + end + + describe "tracks consistency results" do + [ + {label: "shadow when same", stage: LaunchDarkly::Migrations::STAGE_SHADOW, old_return: "same", new_return: "same", expected: true}, + {label: "shadow when different", stage: LaunchDarkly::Migrations::STAGE_SHADOW, old_return: "same", new_return: "different", expected: false}, + + {label: "live when same", stage: LaunchDarkly::Migrations::STAGE_LIVE, old_return: "same", new_return: "same", expected: true}, + {label: "live when different", stage: LaunchDarkly::Migrations::STAGE_LIVE, old_return: "same", new_return: "different", expected: false}, + ].each do |test_param| + it "for #{test_param[:label]}" do + with_client(test_config(data_source: data_source)) do |client| + with_processor_and_sender(default_config, 0) do |ep, sender| + override_client_event_processor(client, ep) + + builder = default_builder(client) + old_callable = ->(_) { LaunchDarkly::Result.success(test_param[:old_return]) } + new_callable = ->(_) { LaunchDarkly::Result.success(test_param[:new_return]) } + compare_callable = ->(lhs, rhs) { lhs == rhs } + + builder.read(old_callable, new_callable, compare_callable) + builder.write(old_callable, new_callable) + migrator = builder.build + + migrator.read(test_param[:stage], basic_context, LaunchDarkly::Migrations::STAGE_OFF) + + ep.flush + ep.wait_until_inactive + events = sender.analytics_payloads.pop + + expect(events.size).to be(3) # Index, migration op, and summary + + op_event = events[1] + consistent = op_event[:measurements][1] # First measurement is invoked + + expect(consistent[:key]).to eq("consistent") + expect(consistent[:value]).to eq(test_param[:expected]) + end + end + end + end + + [ + {label: "shadow when same", stage: LaunchDarkly::Migrations::STAGE_SHADOW}, + {label: "shadow when different", stage: LaunchDarkly::Migrations::STAGE_SHADOW}, + + {label: "live when same", stage: LaunchDarkly::Migrations::STAGE_LIVE}, + {label: "live when different", stage: LaunchDarkly::Migrations::STAGE_LIVE}, + ].each do |test_param| + it "for #{test_param[:label]} unless there is an error" do + with_client(test_config(data_source: data_source)) do |client| + with_processor_and_sender(default_config, 0) do |ep, sender| + override_client_event_processor(client, ep) + + builder = default_builder(client) + old_callable = ->(_) { LaunchDarkly::Result.fail("old fail") } + new_callable = ->(_) { LaunchDarkly::Result.fail("new fail") } + compare_callable = ->(lhs, rhs) { lhs == rhs } + + builder.read(old_callable, new_callable, compare_callable) + builder.write(old_callable, new_callable) + migrator = builder.build + + migrator.read(test_param[:stage], basic_context, LaunchDarkly::Migrations::STAGE_OFF) + + ep.flush + ep.wait_until_inactive + events = sender.analytics_payloads.pop + + expect(events.size).to be(3) # Index, migration op, and summary + + op_event = events[1] + + expect(op_event[:measurements].size).to eq(1) + invoked = op_event[:measurements][0] # First measurement is invoked + + expect(invoked[:key]).to eq("invoked") + end + end + end + end + + [ + {label: "shadow", stage: LaunchDarkly::Migrations::STAGE_SHADOW}, + {label: "live", stage: LaunchDarkly::Migrations::STAGE_LIVE}, + ].each do |test_param| + it "for #{test_param[:label]} when an exception occurs" do + with_client(test_config(data_source: data_source)) do |client| + with_processor_and_sender(default_config, 0) do |ep, sender| + override_client_event_processor(client, ep) + + builder = default_builder(client) + old_callable = ->(_) { LaunchDarkly::Result.success(nil) } + new_callable = ->(_) { LaunchDarkly::Result.success(nil) } + compare_callable = ->(lhs, rhs) { raise "consistency check exception" } + + builder.read(old_callable, new_callable, compare_callable) + builder.write(old_callable, new_callable) + migrator = builder.build + + migrator.read(test_param[:stage], basic_context, LaunchDarkly::Migrations::STAGE_OFF) + + ep.flush + ep.wait_until_inactive + events = sender.analytics_payloads.pop + + expect(events.size).to be(3) # Index, migration op, and summary + + op_event = events[1] + + expect(op_event[:measurements].size).to eq(1) + invoked = op_event[:measurements][0] # First measurement is invoked + + expect(invoked[:key]).to eq("invoked") + end + end + end + end + end + + describe "track errors" do + let(:default_config) { LaunchDarkly::Config.new({ diagnostic_opt_out: true, logger: $null_log }) } + + [ + {label: "off", stage: LaunchDarkly::Migrations::STAGE_OFF, expected: ["old"]}, + {label: "dual write", stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, expected: ["old"]}, + {label: "shadow", stage: LaunchDarkly::Migrations::STAGE_SHADOW, expected: %w[old new]}, + {label: "live", stage: LaunchDarkly::Migrations::STAGE_LIVE, expected: %w[old new]}, + {label: "ramp down", stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, expected: ["new"]}, + {label: "complete", stage: LaunchDarkly::Migrations::STAGE_COMPLETE, expected: ["new"]}, + ].each do |test_param| + it "for #{test_param[:label]}" do + with_client(test_config(data_source: data_source)) do |client| + with_processor_and_sender(default_config, 0) do |ep, sender| + override_client_event_processor(client, ep) + + builder = default_builder(client) + builder.track_errors(true) + old_callable = ->(_) { LaunchDarkly::Result.fail("old") } + new_callable = ->(_) { LaunchDarkly::Result.fail("new") } + + builder.read(old_callable, new_callable) + builder.write(old_callable, new_callable) + migrator = builder.build + + migrator.read(test_param[:stage], basic_context, LaunchDarkly::Migrations::STAGE_OFF) + + ep.flush + ep.wait_until_inactive + events = sender.analytics_payloads.pop + + expect(events.size).to be(3) # Index, migration op, and summary + + op_event = events[1] + errors = op_event[:measurements][1] # First measurement is invoked + + expect(errors[:key]).to eq("error") + test_param[:expected].each { |ev| expect(errors[:values]).to include({ev.to_sym => true}) } + end + end + end + end + end + end + + describe "write operations" do + describe "correct origin is run" do + [ + { stage: LaunchDarkly::Migrations::STAGE_OFF, old: true, new: false }, + { stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, old: true, new: true }, + { stage: LaunchDarkly::Migrations::STAGE_SHADOW, old: true, new: true }, + { stage: LaunchDarkly::Migrations::STAGE_LIVE, old: true, new: true }, + { stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, old: true, new: true }, + { stage: LaunchDarkly::Migrations::STAGE_COMPLETE, old: false, new: true }, + ].each do |params| + it "for #{params[:stage]}" do + with_client(test_config(data_source: data_source)) do |client| + builder = default_builder(client) + + called_old = false + called_new = false + + builder.write( + ->(_) { + called_old = true + LaunchDarkly::Result.success(nil) + }, + ->(_) { + called_new = true + LaunchDarkly::Result.success(nil) + } + ) + + migrator = builder.build + migrator.write(params[:stage].to_s, basic_context, LaunchDarkly::Migrations::STAGE_OFF) + + expect(called_old).to eq(params[:old]) + expect(called_new).to eq(params[:new]) + end + end + end + end + + describe "handles exceptions from origin methods" do + [ + { stage: LaunchDarkly::Migrations::STAGE_OFF, origin: "old" }, + { stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, origin: "old" }, + { stage: LaunchDarkly::Migrations::STAGE_SHADOW, origin: "old" }, + { stage: LaunchDarkly::Migrations::STAGE_LIVE, origin: "new" }, + { stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, origin: "new" }, + { stage: LaunchDarkly::Migrations::STAGE_COMPLETE, origin: "new" }, + ].each do |params| + it "for #{params[:stage]} stage" do + with_client(test_config(data_source: data_source)) do |client| + builder = default_builder(client) + + builder.write(->(_) { raise "old error" }, ->(_) { raise "new error" }) + + migrator = builder.build + result = migrator.write(params[:stage].to_s, basic_context, LaunchDarkly::Migrations::STAGE_OFF) + + expect(result.authoritative.success?).to be false + expect(result.authoritative.error).to eq("'#{params[:origin]}' operation raised an exception") + expect(result.authoritative.exception.to_s).to eq("#{params[:origin]} error") + end + end + end + end + + describe "stop if authoritative write fails" do + [ + # LaunchDarkly::Migrations::STAGE_OFF doesn't run both so we can ignore it. + # + # Old is authoritative, so new shouldn't be called + { stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, old: true, new: false }, + { stage: LaunchDarkly::Migrations::STAGE_SHADOW, old: true, new: false }, + + # New is authoritative, so old shouldn't be called + { stage: LaunchDarkly::Migrations::STAGE_LIVE, old: false, new: true }, + { stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, old: false, new: true }, + + # LaunchDarkly::Migrations::STAGE_COMPLETE doesn't run both so we can ignore it. + ].each do |params| + it "for #{params[:stage]} stage" do + with_client(test_config(data_source: data_source)) do |client| + builder = default_builder(client) + + called_old = false + called_new = false + + builder.write( + ->(_) { + called_old = true + LaunchDarkly::Result.fail("failed old") + }, + ->(_) { + called_new = true + LaunchDarkly::Result.fail("failed new") + } + ) + + migrator = builder.build + migrator.write(params[:stage].to_s, basic_context, LaunchDarkly::Migrations::STAGE_OFF) + + expect(called_old).to eq(params[:old]) + expect(called_new).to eq(params[:new]) + end + end + end + end + + describe "track errors" do + describe "correctly if authoritative fails first" do + [ + {label: "write off", stage: LaunchDarkly::Migrations::STAGE_OFF, expected: "old"}, + {label: "write dual write", stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, expected: "old"}, + {label: "write shadow", stage: LaunchDarkly::Migrations::STAGE_SHADOW, expected: "old"}, + {label: "write live", stage: LaunchDarkly::Migrations::STAGE_LIVE, expected: "new"}, + {label: "write ramp down", stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, expected: "new"}, + {label: "write complete", stage: LaunchDarkly::Migrations::STAGE_COMPLETE, expected: "new"}, + ].each do |test_param| + it test_param[:label] do + with_client(test_config(data_source: data_source)) do |client| + with_processor_and_sender(default_config, 0) do |ep, sender| + override_client_event_processor(client, ep) + + builder = default_builder(client) + builder.track_errors(true) + old_callable = ->(_) { LaunchDarkly::Result.fail("old") } + new_callable = ->(_) { LaunchDarkly::Result.fail("new") } + + builder.read(old_callable, new_callable) + builder.write(old_callable, new_callable) + migrator = builder.build + + migrator.write(test_param[:stage], basic_context, LaunchDarkly::Migrations::STAGE_OFF) + + ep.flush + ep.wait_until_inactive + events = sender.analytics_payloads.pop + + expect(events.size).to be(3) # Index, migration op, and summary + + op_event = events[1] + errors = op_event[:measurements][1] # First measurement is invoked + + expect(errors[:key]).to eq("error") + expect(errors[:values]).to include({test_param[:expected].to_sym => true}) + end + end + end + end + end + + describe "correctly if authoritative does not fail" do + [ + # OFF and COMPLETE do not run both origins, so there is nothing to test. + {label: "dual write", stage: LaunchDarkly::Migrations::STAGE_DUALWRITE, old_fail: false, new_fail: true, expected: "new"}, + {label: "shadow", stage: LaunchDarkly::Migrations::STAGE_SHADOW, old_fail: false, new_fail: true, expected: "new"}, + {label: "live", stage: LaunchDarkly::Migrations::STAGE_LIVE, old_fail: true, new_fail: false, expected: "old"}, + {label: "ramp down", stage: LaunchDarkly::Migrations::STAGE_RAMPDOWN, old_fail: true, new_fail: false, expected: "old"}, + ].each do |test_param| + it "for #{test_param[:label]}" do + with_client(test_config(data_source: data_source)) do |client| + with_processor_and_sender(default_config, 0) do |ep, sender| + override_client_event_processor(client, ep) + + builder = default_builder(client) + builder.track_errors(true) + + old_callable = ->(_) { + if test_param[:old_fail] + LaunchDarkly::Result.fail("old") + else + LaunchDarkly::Result.success(nil) + end + } + new_callable = ->(_) { + if test_param[:new_fail] + LaunchDarkly::Result.fail("new") + else + LaunchDarkly::Result.success(nil) + end + } + + builder.read(old_callable, new_callable) + builder.write(old_callable, new_callable) + migrator = builder.build + + migrator.write(test_param[:stage], basic_context, LaunchDarkly::Migrations::STAGE_OFF) + + ep.flush + ep.wait_until_inactive + events = sender.analytics_payloads.pop + + expect(events.size).to be(3) # Index, migration op, and summary + + op_event = events[1] + errors = op_event[:measurements][1] # First measurement is invoked + + expect(errors[:key]).to eq("error") + expect(errors[:values]).to include({test_param[:expected].to_sym => true}) + end + end + end + end + end + end + end + end + end + end +end diff --git a/spec/impl/migrations/tracker_spec.rb b/spec/impl/migrations/tracker_spec.rb new file mode 100644 index 00000000..28f760d4 --- /dev/null +++ b/spec/impl/migrations/tracker_spec.rb @@ -0,0 +1,331 @@ +require 'ldclient-rb/interfaces' +require "ldclient-rb" + +module LaunchDarkly + module Impl + module Migrations + describe OpTracker do + let(:flag_data) { LaunchDarkly::Integrations::TestData::FlagBuilder.new("feature").build(1) } + let(:flag) { LaunchDarkly::Impl::Model::FeatureFlag.new(flag_data) } + let(:context) { LaunchDarkly::LDContext.with_key("user-key") } + let(:detail) { LaunchDarkly::EvaluationDetail.new(true, 0, LaunchDarkly::EvaluationReason.fallthrough) } + + def minimal_tracker() + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_OLD) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_NEW) + + tracker + end + + it "can build successfully" do + event = minimal_tracker.build + expect(event).to be_instance_of(LaunchDarkly::Impl::MigrationOpEvent) + end + + it "can build without a flag" do + tracker = OpTracker.new($null_logger, "feature", nil, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_OLD) + event = tracker.build + expect(event).to be_instance_of(LaunchDarkly::Impl::MigrationOpEvent) + end + + describe "can track invocations" do + it "individually" do + [LaunchDarkly::Migrations::ORIGIN_OLD, LaunchDarkly::Migrations::ORIGIN_NEW].each do |origin| + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + tracker.invoked(origin) + + event = tracker.build + expect(event.invoked.length).to eq(1) + expect(event.invoked.include?(origin)).to be true + end + end + + it "together" do + event = minimal_tracker.build + expect(event.invoked.length).to be(2) + end + + it "will ignore invalid origins" do + tracker = minimal_tracker + tracker.invoked(:invalid_origin) + tracker.invoked(:another_invalid_origin) + + event = tracker.build + expect(event.invoked.length).to be(2) + expect(event.invoked.include?(LaunchDarkly::Migrations::ORIGIN_OLD)).to be true + expect(event.invoked.include?(LaunchDarkly::Migrations::ORIGIN_NEW)).to be true + end + end + + describe "can track consistency" do + it "with no sampling ratio" do + [true, false].each do |expected_consistent| + tracker = minimal_tracker + tracker.consistent(-> { expected_consistent }) + event = tracker.build + + expect(event.consistency_check).to be expected_consistent + expect(event.consistency_check_ratio).to eq(1) + end + end + + it "with explicit sampling ratio of 1" do + settings = LaunchDarkly::Integrations::TestData::FlagBuilder::FlagMigrationSettingsBuilder.new + settings.check_ratio(1) + + builder = LaunchDarkly::Integrations::TestData::FlagBuilder.new("feature") + builder.migration_settings(settings.build) + flag = LaunchDarkly::Impl::Model::FeatureFlag.new(builder.build(1)) + context = LaunchDarkly::LDContext.with_key("user-key") + detail = LaunchDarkly::EvaluationDetail.new(true, 0, LaunchDarkly::EvaluationReason.fallthrough) + + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_OLD) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_NEW) + + [true, false].each do |expected_consistent| + tracker.consistent(-> { expected_consistent }) + event = tracker.build + + expect(event.consistency_check).to be expected_consistent + expect(event.consistency_check_ratio).to eq(1) + end + end + + it "unless disabled with sampling ratio of 0" do + settings = LaunchDarkly::Integrations::TestData::FlagBuilder::FlagMigrationSettingsBuilder.new + settings.check_ratio(0) + + builder = LaunchDarkly::Integrations::TestData::FlagBuilder.new("feature") + builder.migration_settings(settings.build) + flag = LaunchDarkly::Impl::Model::FeatureFlag.new(builder.build(1)) + context = LaunchDarkly::LDContext.with_key("user-key") + detail = LaunchDarkly::EvaluationDetail.new(true, 0, LaunchDarkly::EvaluationReason.fallthrough) + + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_OLD) + + [true, false].each do |expected_consistent| + tracker.consistent(-> { expected_consistent }) + event = tracker.build + + expect(event.consistency_check).to be_nil + expect(event.consistency_check_ratio).to be_nil + end + end + + it "when supplied a non-trivial sampling ratio" do + settings = LaunchDarkly::Integrations::TestData::FlagBuilder::FlagMigrationSettingsBuilder.new + settings.check_ratio(10) + + builder = LaunchDarkly::Integrations::TestData::FlagBuilder.new("feature") + builder.migration_settings(settings.build) + flag = LaunchDarkly::Impl::Model::FeatureFlag.new(builder.build(1)) + context = LaunchDarkly::LDContext.with_key("user-key") + detail = LaunchDarkly::EvaluationDetail.new(true, 0, LaunchDarkly::EvaluationReason.fallthrough) + + sampler = LaunchDarkly::Impl::Sampler.new(Random.new(0)) + + + count = 0 + 1_000.times do |_| + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.instance_variable_set(:@sampler, sampler) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_OLD) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_NEW) + + tracker.consistent(-> { true }) + event = tracker.build + + unless event.consistency_check.nil? + count += 1 + expect(event.consistency_check_ratio).to eq(10) + end + end + + expect(count).to eq(98) + end + end + + describe "can track errors" do + it "individually" do + [LaunchDarkly::Migrations::ORIGIN_OLD, LaunchDarkly::Migrations::ORIGIN_NEW].each do |origin| + tracker = minimal_tracker + tracker.error(origin) + + event = tracker.build + expect(event.errors.length).to eq(1) + expect(event.errors.include?(origin)).to be true + end + end + + it "together" do + tracker = minimal_tracker + tracker.error(LaunchDarkly::Migrations::ORIGIN_OLD) + tracker.error(LaunchDarkly::Migrations::ORIGIN_NEW) + + event = tracker.build + expect(event.errors.length).to be(2) + end + + it "will ignore invalid origins" do + tracker = minimal_tracker + tracker.error(:invalid_origin) + tracker.error(:another_invalid_origin) + + event = tracker.build + expect(event.errors.length).to be(0) + end + end + + describe "can track latencies" do + it "individually" do + [LaunchDarkly::Migrations::ORIGIN_OLD, LaunchDarkly::Migrations::ORIGIN_NEW].each do |origin| + tracker = minimal_tracker + tracker.latency(origin, 5.4) + + event = tracker.build + expect(event.latencies.length).to eq(1) + expect(event.latencies[origin]).to eq(5.4) + end + end + + it "together" do + tracker = minimal_tracker + tracker.latency(LaunchDarkly::Migrations::ORIGIN_OLD, 2) + tracker.latency(LaunchDarkly::Migrations::ORIGIN_NEW, 3) + + event = tracker.build + expect(event.latencies.length).to be(2) + expect(event.latencies[LaunchDarkly::Migrations::ORIGIN_OLD]).to eq(2) + expect(event.latencies[LaunchDarkly::Migrations::ORIGIN_NEW]).to eq(3) + end + + it "will ignore invalid origins" do + tracker = minimal_tracker + tracker.latency(:invalid_origin, 3) + tracker.latency(:another_invalid_origin, 10) + + event = tracker.build + expect(event.latencies.length).to be(0) + end + + it "will ignore invalid durations" do + tracker = minimal_tracker + tracker.latency(LaunchDarkly::Migrations::ORIGIN_OLD, -1) + tracker.latency(LaunchDarkly::Migrations::ORIGIN_NEW, nil) + + event = tracker.build + expect(event.latencies.length).to be(0) + end + end + + describe "can handle build failures" do + it "without providing a key" do + tracker = OpTracker.new($null_logger, "", nil, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + + event = tracker.build + expect(event).to eq("operation cannot contain an empty key") + end + + it "without operation" do + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + event = tracker.build + expect(event).to eq("operation not provided") + end + + it "with invalid operation" do + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation("nonsense") + event = tracker.build + expect(event).to eq("operation not provided") + end + + it "without calling invoked" do + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + + event = tracker.build + expect(event).to eq("no origins were invoked") + end + + it "with invalid context " do + invalid = LaunchDarkly::LDContext.create({kind: 'multi', key: 'invalid'}) + tracker = OpTracker.new($null_logger, flag.key, flag, invalid, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_OLD) + + event = tracker.build + expect(event).to eq("provided context was invalid") + end + + describe "detects when invoked doesn't align with" do + it "latency" do + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_NEW) + tracker.latency(LaunchDarkly::Migrations::ORIGIN_OLD, 10) + + event = tracker.build + expect(event).to eq("provided latency for origin 'old' without recording invocation") + + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_OLD) + tracker.latency(LaunchDarkly::Migrations::ORIGIN_NEW, 10) + + event = tracker.build + expect(event).to eq("provided latency for origin 'new' without recording invocation") + + end + + it "errors" do + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_NEW) + tracker.error(LaunchDarkly::Migrations::ORIGIN_OLD) + + event = tracker.build + expect(event).to eq("provided error for origin 'old' without recording invocation") + + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_WRITE) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_OLD) + tracker.error(LaunchDarkly::Migrations::ORIGIN_NEW) + + event = tracker.build + expect(event).to eq("provided error for origin 'new' without recording invocation") + end + + it "consistent" do + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_READ) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_OLD) + tracker.consistent(->{ true }) + + event = tracker.build + expect(event).to eq("provided consistency without recording both invocations") + + tracker = OpTracker.new($null_logger, flag.key, flag, context, detail, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_READ) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_NEW) + tracker.consistent(->{ true }) + + event = tracker.build + expect(event).to eq("provided consistency without recording both invocations") + end + end + end + end + end + end +end diff --git a/spec/impl/sampler_spec.rb b/spec/impl/sampler_spec.rb new file mode 100644 index 00000000..13171ce6 --- /dev/null +++ b/spec/impl/sampler_spec.rb @@ -0,0 +1,33 @@ +require "ldclient-rb/impl/sampler" + +module LaunchDarkly + module Impl + describe Sampler do + it "samples false for non-integer values" do + sampler = Sampler.new(Random.new) + ["not an int", true, 3.0].each do |value| + expect(sampler.sample(value)).to be(false) + end + end + + it "non-positive ints are considered false" do + sampler = Sampler.new(Random.new) + (-10..0).each do |value| + expect(sampler.sample(value)).to be(false) + end + end + + it "one is true" do + expect(Sampler.new(Random.new).sample(1)).to be(true) + end + + it "can control sampling ratio" do + count = 0 + sampler = Sampler.new(Random.new(0)) + sampled = 1_000.times.select { |_| sampler.sample(10) } + + expect(sampled.size).to eq(98) + end + end + end +end diff --git a/spec/integrations/file_data_source_spec.rb b/spec/integrations/file_data_source_spec.rb index 97704036..8b22e902 100644 --- a/spec/integrations/file_data_source_spec.rb +++ b/spec/integrations/file_data_source_spec.rb @@ -290,7 +290,7 @@ def test_auto_reload(options) client = LaunchDarkly::LDClient.new('sdkKey', config) begin - value = client.variation(flag_value_1_key, { key: 'user' }, '') + value = client.variation(flag_value_1_key, { key: 'user', kind: 'user' }, '') expect(value).to eq(flag_value_1) ensure client.close @@ -304,7 +304,7 @@ def test_auto_reload(options) client = LaunchDarkly::LDClient.new('sdkKey', config) begin - value = client.variation(full_flag_1_key, { key: 'user' }, '') + value = client.variation(full_flag_1_key, { key: 'user', kind: 'user' }, '') expect(value).to eq(full_flag_1_value) ensure client.close diff --git a/spec/ldclient_evaluation_spec.rb b/spec/ldclient_evaluation_spec.rb index 144b63b6..6656237f 100644 --- a/spec/ldclient_evaluation_spec.rb +++ b/spec/ldclient_evaluation_spec.rb @@ -154,7 +154,7 @@ module LaunchDarkly it "returns flags state" do with_client(test_config(data_source: test_data)) do |client| - state = client.all_flags_state({ key: 'userkey' }) + state = client.all_flags_state({ key: 'userkey', kind: 'user' }) expect(state.valid?).to be true values = state.values_map @@ -189,7 +189,7 @@ module LaunchDarkly td.use_preconfigured_flag({ key: "client-side-2", offVariation: 0, variations: [ 'value2' ], clientSide: true }) with_client(test_config(data_source: td)) do |client| - state = client.all_flags_state({ key: 'userkey' }, client_side_only: true) + state = client.all_flags_state({ key: 'userkey', kind: 'user' }, client_side_only: true) expect(state.valid?).to be true values = state.values_map @@ -205,7 +205,7 @@ module LaunchDarkly td.use_preconfigured_flag({ key: "key3", version: 300, offVariation: 1, variations: %w[x value3], debugEventsUntilDate: future_time }) with_client(test_config(data_source: td)) do |client| - state = client.all_flags_state({ key: 'userkey' }, { details_only_for_tracked_flags: true }) + state = client.all_flags_state({ key: 'userkey', kind: 'user' }, { details_only_for_tracked_flags: true }) expect(state.valid?).to be true values = state.values_map diff --git a/spec/ldclient_events_spec.rb b/spec/ldclient_events_spec.rb index e02981c2..9c1d7f07 100644 --- a/spec/ldclient_events_spec.rb +++ b/spec/ldclient_events_spec.rb @@ -22,7 +22,7 @@ def event_processor(client) with_client(test_config) do |client| context = basic_context expect(event_processor(client)).to receive(:record_eval_event).with( - context, 'badkey', nil, nil, 'default', nil, 'default', false, nil, nil + context, 'badkey', nil, nil, 'default', nil, 'default', false, nil, nil, 1, false ) client.variation("badkey", context, "default") end @@ -35,7 +35,7 @@ def event_processor(client) context = basic_context with_client(test_config(data_source: td)) do |client| expect(event_processor(client)).to receive(:record_eval_event).with( - context, 'flagkey', 1, 0, 'value', nil, 'default', false, nil, nil + context, 'flagkey', 1, 0, 'value', nil, 'default', false, nil, nil, nil, false ) client.variation("flagkey", context, "default") end @@ -81,7 +81,7 @@ def event_processor(client) with_client(test_config(data_source: td)) do |client| expect(event_processor(client)).to receive(:record_eval_event).with( context, 'flagkey', 100, 0, 'value', LaunchDarkly::EvaluationReason::rule_match(0, 'id'), - 'default', true, nil, nil + 'default', true, nil, nil, nil, false ) client.variation("flagkey", context, "default") end @@ -98,7 +98,7 @@ def event_processor(client) with_client(test_config(data_source: td)) do |client| expect(event_processor(client)).to receive(:record_eval_event).with( context, 'flagkey', 100, 0, 'value', LaunchDarkly::EvaluationReason::fallthrough, - 'default', true, nil, nil + 'default', true, nil, nil, nil, false ) client.variation("flagkey", context, "default") end @@ -112,7 +112,7 @@ def event_processor(client) expect(event_processor(client)).to receive(:record_eval_event).with( context, 'badkey', nil, nil, 'default', LaunchDarkly::EvaluationReason::error(LaunchDarkly::EvaluationReason::ERROR_FLAG_NOT_FOUND), - 'default', false, nil, nil + 'default', false, nil, nil, 1, false ) client.variation_detail("badkey", context, "default") end @@ -126,7 +126,7 @@ def event_processor(client) with_client(test_config(data_source: td)) do |client| expect(event_processor(client)).to receive(:record_eval_event).with( context, 'flagkey', 1, 0, 'value', LaunchDarkly::EvaluationReason::off, - 'default', false, nil, nil + 'default', false, nil, nil, nil, false ) client.variation_detail("flagkey", context, "default") end diff --git a/spec/ldclient_migration_variation_spec.rb b/spec/ldclient_migration_variation_spec.rb new file mode 100644 index 00000000..b1dc2c6d --- /dev/null +++ b/spec/ldclient_migration_variation_spec.rb @@ -0,0 +1,81 @@ +require "ldclient-rb" +require "mock_components" +require "model_builders" + +module LaunchDarkly + describe "LDClient migration variation tests" do + it "returns off if default stage is invalid" do + td = Integrations::TestData.data_source + + with_client(test_config(data_source: td)) do |client| + result, tracker = client.migration_variation("flagkey", basic_context, "invalid stage should default to off") + + expect(result).to eq(LaunchDarkly::Migrations::STAGE_OFF) + expect(tracker).not_to be_nil + end + end + + it "returns error if flag isn't found" do + td = Integrations::TestData.data_source + + with_client(test_config(data_source: td)) do |client| + result, tracker = client.migration_variation("flagkey", basic_context, LaunchDarkly::Migrations::STAGE_LIVE) + + expect(result).to eq(LaunchDarkly::Migrations::STAGE_LIVE) + expect(tracker).not_to be_nil + end + end + + it "flag doesn't return a valid stage" do + td = Integrations::TestData.data_source + td.update(td.flag("flagkey").variations("value").variation_for_all(0)) + + with_client(test_config(data_source: td)) do |client| + result, tracker = client.migration_variation("flagkey", basic_context, LaunchDarkly::Migrations::STAGE_LIVE) + tracker.operation(LaunchDarkly::Migrations::OP_READ) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_OLD) + + expect(result).to eq(LaunchDarkly::Migrations::STAGE_LIVE) + expect(tracker).not_to be_nil + + event = tracker.build + expect(event.evaluation.value).to eq(LaunchDarkly::Migrations::STAGE_LIVE.to_s) + expect(event.evaluation.variation_index).to be_nil + expect(event.evaluation.reason.error_kind).to eq(LaunchDarkly::EvaluationReason::ERROR_WRONG_TYPE) + end + end + + it "flag doesn't return a valid stage and default is invalid" do + td = Integrations::TestData.data_source + td.update(td.flag("flagkey").variations("value").variation_for_all(0)) + + with_client(test_config(data_source: td)) do |client| + result, tracker = client.migration_variation("flagkey", basic_context, "invalid stage") + tracker.operation(LaunchDarkly::Migrations::OP_READ) + tracker.invoked(LaunchDarkly::Migrations::ORIGIN_OLD) + + expect(result).to eq(LaunchDarkly::Migrations::STAGE_OFF) + expect(tracker).not_to be_nil + + event = tracker.build + expect(event.evaluation.value).to eq(LaunchDarkly::Migrations::STAGE_OFF.to_s) + expect(event.evaluation.variation_index).to be_nil + expect(event.evaluation.reason.error_kind).to eq(LaunchDarkly::EvaluationReason::ERROR_WRONG_TYPE) + end + end + + it "can determine correct stage from flag" do + LaunchDarkly::Migrations::VALID_STAGES.each do |stage| + td = Integrations::TestData.data_source + td.update(td.flag("flagkey").variations(stage).variation_for_all(0)) + + with_client(test_config(data_source: td)) do |client| + result, tracker = client.migration_variation("flagkey", basic_context, LaunchDarkly::Migrations::STAGE_LIVE) + + expect(result).to eq(stage) + expect(tracker).not_to be_nil + end + end + end + end +end diff --git a/spec/ldclient_spec.rb b/spec/ldclient_spec.rb index ce993b41..61117210 100644 --- a/spec/ldclient_spec.rb +++ b/spec/ldclient_spec.rb @@ -48,7 +48,7 @@ module LaunchDarkly context "secure_mode_hash" do it "will return the expected value for a known message and secret" do ensure_close(subject.new("secret", test_config)) do |client| - result = client.secure_mode_hash({key: :Message}) + result = client.secure_mode_hash(LDContext.create({key: 'Message', kind: 'user'})) expect(result).to eq "aa747c502a898200f9e4fa21bac68136f886a0e27aec70ba06daf2e2a5cb5597" end end diff --git a/spec/migrator_builder_spec.rb b/spec/migrator_builder_spec.rb new file mode 100644 index 00000000..00113f2b --- /dev/null +++ b/spec/migrator_builder_spec.rb @@ -0,0 +1,94 @@ +require 'ldclient-rb/interfaces' +require "mock_components" + +module LaunchDarkly + module Migrations + describe MigratorBuilder do + subject { MigratorBuilder } + + describe "can build" do + it "when properly configured" do + with_client(test_config) do |client| + builder = subject.new(client) + builder.read(->(_) { return true }, ->(_) { return true }) + builder.write(->(_) { return true }, ->(_) { return true }) + migrator = builder.build + + expect(migrator).to be_a LaunchDarkly::Interfaces::Migrations::Migrator + end + end + + it "can modify execution order" do + [MigratorBuilder::EXECUTION_PARALLEL, MigratorBuilder::EXECUTION_RANDOM, MigratorBuilder::EXECUTION_SERIAL].each do |order| + with_client(test_config) do |client| + builder = subject.new(client) + builder.read_execution_order(order) + builder.read(->(_) { return true }, ->(_) { return true }) + builder.write(->(_) { return true }, ->(_) { return true }) + migrator = builder.build + + expect(migrator).to be_a LaunchDarkly::Interfaces::Migrations::Migrator + end + end + end + end + + describe "will fail to build" do + it "if no client is provided" do + error = subject.new(nil).build + + expect(error).to eq("client not provided") + end + + it "if read config isn't provided" do + with_client(test_config) do |client| + error = subject.new(client).build + + expect(error).to eq("read configuration not provided") + end + end + + it "if read config has wrong arity" do + with_client(test_config) do |client| + builder = subject.new(client) + builder.read(-> { return true }, -> { return true }) + error = builder.build + + expect(error).to eq("read configuration not provided") + end + end + + it "if read comparison has wrong arity" do + with_client(test_config) do |client| + builder = subject.new(client) + builder.read(->(_) { return true }, ->(_) { return true }, ->(_) { return true }) + error = builder.build + + expect(error).to eq("read configuration not provided") + end + end + + it "if write config isn't provided" do + with_client(test_config) do |client| + builder = subject.new(client) + builder.read(->(_) { return true }, ->(_) { return true }) + + error = builder.build + expect(error).to eq("write configuration not provided") + end + end + + it "if write config has wrong arity" do + with_client(test_config) do |client| + builder = subject.new(client) + builder.read(->(_) { return true }, ->(_) { return true }) + builder.write(-> { return true }, -> { return true }) + error = builder.build + + expect(error).to eq("write configuration not provided") + end + end + end + end + end +end diff --git a/spec/mock_components.rb b/spec/mock_components.rb index e0d2572e..3866b9f3 100644 --- a/spec/mock_components.rb +++ b/spec/mock_components.rb @@ -35,7 +35,7 @@ def with_client(config) end def basic_context - LaunchDarkly::LDContext::create({ "key": "user-key" }) + LaunchDarkly::LDContext::create({ "key": "user-key", kind: "user" }) end module LaunchDarkly