From fd75faee8d8a0bb7cdd5bbc563f45b3680a2e4e3 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Wed, 8 Jan 2020 15:23:48 -0600 Subject: [PATCH 1/5] Fix lint issue C0301 Relates #838 --- esrally/client.py | 3 ++- esrally/driver/runner.py | 13 +++++++------ esrally/mechanic/provisioner.py | 7 ++++--- esrally/metrics.py | 7 ++++--- esrally/rally.py | 8 +++++--- esrally/reporter.py | 21 ++++++++++++--------- esrally/telemetry.py | 3 ++- esrally/track/__init__.py | 4 +++- esrally/utils/io.py | 1 + tests/client_test.py | 12 ++++++------ tests/driver/runner_test.py | 6 +++--- tests/mechanic/launcher_test.py | 2 +- tests/racecontrol_test.py | 3 ++- tests/telemetry_test.py | 29 +++++++++++++++-------------- tests/utils/opts_test.py | 4 +++- 15 files changed, 70 insertions(+), 53 deletions(-) diff --git a/esrally/client.py b/esrally/client.py index e39395c96..e65faf68f 100644 --- a/esrally/client.py +++ b/esrally/client.py @@ -49,7 +49,8 @@ def __init__(self, hosts, client_options): # ssl.Purpose.CLIENT_AUTH allows presenting client certs and can only be enabled during instantiation # but can be disabled via the verify_mode property later on. - self.ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=self.client_options.pop("ca_certs", certifi.where())) + self.ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, + cafile=self.client_options.pop("ca_certs", certifi.where())) if not self.client_options.pop("verify_certs", True): self.logger.info("SSL certificate verification: off") diff --git a/esrally/driver/runner.py b/esrally/driver/runner.py index 8423b36d1..133ce6a5a 100644 --- a/esrally/driver/runner.py +++ b/esrally/driver/runner.py @@ -315,11 +315,11 @@ def __call__(self, es, params): If ``detailed-results`` is ``True`` the following meta data are returned in addition: * ``ops``: A hash with the operation name as key (e.g. index, update, delete) and various counts as values. ``item-count`` contains - the total number of items for this key. Additionally, we return a separate counter each result (indicating e.g. the number of created - items, the number of deleted items etc.). - * ``shards_histogram``: An array of hashes where each hash has two keys: ``item-count`` contains the number of items to which a shard - distribution applies and ``shards`` contains another hash with the actual distribution of ``total``, ``successful`` and ``failed`` - shards (see examples below). + the total number of items for this key. Additionally, we return a separate counter each result (indicating e.g. the number of + created items, the number of deleted items etc.). + * ``shards_histogram``: An array of hashes where each hash has two keys: ``item-count`` contains the number of items to which a + shard distribution applies and ``shards`` contains another hash with the actual distribution of ``total``, ``successful`` and + ``failed`` shards (see examples below). * ``bulk-request-size-bytes``: Total size of the bulk request body in bytes. * ``total-document-size-bytes``: Total size of all documents within the bulk request body in bytes. @@ -919,7 +919,8 @@ def __repr__(self, *args, **kwargs): class DeleteIndexTemplate(Runner): """ - Execute the `delete index template API `_. + Execute the `delete index template API + `_. """ def __call__(self, es, params): diff --git a/esrally/mechanic/provisioner.py b/esrally/mechanic/provisioner.py index 5d1c66825..5e73d3ad8 100644 --- a/esrally/mechanic/provisioner.py +++ b/esrally/mechanic/provisioner.py @@ -34,7 +34,7 @@ def local(cfg, car, plugins, cluster_settings, ip, http_port, all_node_ips, all_ node_root_dir = os.path.join(target_root, node_name) _, java_home = java_resolver.java_home(car.mandatory_var("runtime.jdk"), cfg) - + es_installer = ElasticsearchInstaller(car, java_home, node_name, node_root_dir, all_node_ips, all_node_names, ip, http_port) plugin_installers = [PluginInstaller(plugin, java_home) for plugin in plugins] @@ -227,12 +227,13 @@ def _provisioner_variables(self): provisioner_vars.update(self.es_installer.variables) provisioner_vars.update(plugin_variables) provisioner_vars["cluster_settings"] = cluster_settings - + return provisioner_vars class ElasticsearchInstaller: - def __init__(self, car, java_home, node_name, node_root_dir, all_node_ips, all_node_names, ip, http_port, hook_handler_class=team.BootstrapHookHandler): + def __init__(self, car, java_home, node_name, node_root_dir, all_node_ips, all_node_names, ip, http_port, + hook_handler_class=team.BootstrapHookHandler): self.car = car self.java_home = java_home self.node_name = node_name diff --git a/esrally/metrics.py b/esrally/metrics.py index 2835b19fe..87520e188 100644 --- a/esrally/metrics.py +++ b/esrally/metrics.py @@ -1196,12 +1196,13 @@ def format_dict(d): races = [] for race in race_store(cfg).list(): - races.append([race.race_id, time.to_iso8601(race.race_timestamp), race.track, format_dict(race.track_params), race.challenge_name, race.car_name, - format_dict(race.user_tags), race.track_revision, race.team_revision]) + races.append([race.race_id, time.to_iso8601(race.race_timestamp), race.track, format_dict(race.track_params), race.challenge_name, + race.car_name, format_dict(race.user_tags), race.track_revision, race.team_revision]) if len(races) > 0: console.println("\nRecent races:\n") - console.println(tabulate.tabulate(races, headers=["Race ID", "Race Timestamp", "Track", "Track Parameters", "Challenge", "Car", "User Tags", "Track Revision", "Team Revision"])) + console.println(tabulate.tabulate(races, headers=["Race ID", "Race Timestamp", "Track", "Track Parameters", "Challenge", "Car", + "User Tags", "Track Revision", "Team Revision"])) else: console.println("") console.println("No recent races found.") diff --git a/esrally/rally.py b/esrally/rally.py index 6acf4c960..92ad4f095 100644 --- a/esrally/rally.py +++ b/esrally/rally.py @@ -454,7 +454,7 @@ def positive_number(v): "--distribution-repository", help="Define the repository from where the Elasticsearch distribution should be downloaded (default: release).", default="release") - + task_filter_group = p.add_mutually_exclusive_group() task_filter_group.add_argument( "--include-tasks", @@ -658,7 +658,8 @@ def with_actor_system(runnable, cfg): logger.warning("User interrupted shutdown of internal actor system.") console.info("Please wait a moment for Rally's internal components to shutdown.") if not shutdown_complete and times_interrupted > 0: - logger.warning("Terminating after user has interrupted actor system shutdown explicitly for [%d] times.", times_interrupted) + logger.warning("Terminating after user has interrupted actor system shutdown explicitly for [%d] times.", + times_interrupted) console.println("") console.warn("Terminating now at the risk of leaving child processes behind.") console.println("") @@ -667,7 +668,8 @@ def with_actor_system(runnable, cfg): console.println(SKULL) console.println("") elif not shutdown_complete: - console.warn("Could not terminate all internal processes within timeout. Please check and force-terminate all Rally processes.") + console.warn("Could not terminate all internal processes within timeout. Please check and force-terminate " + "all Rally processes.") def generate(cfg): diff --git a/esrally/reporter.py b/esrally/reporter.py index ba9df412b..1b0a339bc 100644 --- a/esrally/reporter.py +++ b/esrally/reporter.py @@ -205,9 +205,12 @@ def report_total_time(self, name, total_time): def report_total_time_per_shard(self, name, total_time_per_shard): unit = "min" return self.join( - self.line("Min cumulative {} across primary shards".format(name), "", total_time_per_shard.get("min"), unit, convert.ms_to_minutes), - self.line("Median cumulative {} across primary shards".format(name), "", total_time_per_shard.get("median"), unit, convert.ms_to_minutes), - self.line("Max cumulative {} across primary shards".format(name), "", total_time_per_shard.get("max"), unit, convert.ms_to_minutes), + self.line("Min cumulative {} across primary shards".format(name), "", total_time_per_shard.get("min"), unit, + convert.ms_to_minutes), + self.line("Median cumulative {} across primary shards".format(name), "", total_time_per_shard.get("median"), + unit, convert.ms_to_minutes), + self.line("Max cumulative {} across primary shards".format(name), "", total_time_per_shard.get("max"), unit, + convert.ms_to_minutes), ) def report_total_count(self, name, total_count): @@ -474,12 +477,12 @@ def report_total_time(self, name, baseline_total, contender_total): def report_total_time_per_shard(self, name, baseline_per_shard, contender_per_shard): unit = "min" return self.join( - self.line("Min cumulative {} across primary shard".format(name), baseline_per_shard.get("min"), contender_per_shard.get("min"), "", unit, - treat_increase_as_improvement=False, formatter=convert.ms_to_minutes), - self.line("Median cumulative {} across primary shard".format(name), baseline_per_shard.get("median"), contender_per_shard.get("median"), "", unit, - treat_increase_as_improvement=False, formatter=convert.ms_to_minutes), - self.line("Max cumulative {} across primary shard".format(name), baseline_per_shard.get("max"), contender_per_shard.get("max"), "", unit, - treat_increase_as_improvement=False, formatter=convert.ms_to_minutes), + self.line("Min cumulative {} across primary shard".format(name), baseline_per_shard.get("min"), + contender_per_shard.get("min"), "", unit, treat_increase_as_improvement=False, formatter=convert.ms_to_minutes), + self.line("Median cumulative {} across primary shard".format(name), baseline_per_shard.get("median"), + contender_per_shard.get("median"), "", unit, treat_increase_as_improvement=False, formatter=convert.ms_to_minutes), + self.line("Max cumulative {} across primary shard".format(name), baseline_per_shard.get("max"), contender_per_shard.get("max"), + "", unit, treat_increase_as_improvement=False, formatter=convert.ms_to_minutes), ) def report_total_count(self, name, baseline_total, contender_total): diff --git a/esrally/telemetry.py b/esrally/telemetry.py index 358f9a60d..3da628a95 100644 --- a/esrally/telemetry.py +++ b/esrally/telemetry.py @@ -1049,7 +1049,8 @@ def on_benchmark_stop(self): self.metrics_store.put_doc(doc=ct, level=metrics.MetaInfoScope.cluster) self.add_metrics(self.extract_value(p, ["segments", "doc_values_memory_in_bytes"]), "segments_doc_values_memory_in_bytes", "byte") - self.add_metrics(self.extract_value(p, ["segments", "stored_fields_memory_in_bytes"]), "segments_stored_fields_memory_in_bytes", "byte") + self.add_metrics(self.extract_value(p, ["segments", "stored_fields_memory_in_bytes"]), "segments_stored_fields_memory_in_bytes", + "byte") self.add_metrics(self.extract_value(p, ["segments", "terms_memory_in_bytes"]), "segments_terms_memory_in_bytes", "byte") self.add_metrics(self.extract_value(p, ["segments", "norms_memory_in_bytes"]), "segments_norms_memory_in_bytes", "byte") self.add_metrics(self.extract_value(p, ["segments", "points_memory_in_bytes"]), "segments_points_memory_in_bytes", "byte") diff --git a/esrally/track/__init__.py b/esrally/track/__init__.py index 84c8e6aa5..5accba045 100644 --- a/esrally/track/__init__.py +++ b/esrally/track/__init__.py @@ -15,7 +15,9 @@ # specific language governing permissions and limitations # under the License. -from .loader import list_tracks, track_info, load_track, load_track_plugins, track_repo, prepare_track, operation_parameters, set_absolute_data_path +from .loader import ( + list_tracks, track_info, load_track, load_track_plugins, track_repo, prepare_track, operation_parameters, set_absolute_data_path +) # expose the complete track API from .track import * diff --git a/esrally/utils/io.py b/esrally/utils/io.py index e0b815b04..79407f376 100644 --- a/esrally/utils/io.py +++ b/esrally/utils/io.py @@ -428,6 +428,7 @@ def guess_java_home(major_version=8, fallback=None, runner=_run, read_symlink=_r m = debian_jdk_pattern.match(j) if m: return m.group(1) + # pylint: disable=line-too-long # Red Hat based distributions # # ls -l /etc/alternatives/jre_1.[789].0 diff --git a/tests/client_test.py b/tests/client_test.py index 298b51033..89a8ce717 100644 --- a/tests/client_test.py +++ b/tests/client_test.py @@ -64,8 +64,8 @@ def test_create_https_connection_verify_server(self, mocked_load_cert_chain): mock.call("SSL client authentication: off") ]) - assert not mocked_load_cert_chain.called, "ssl_context.load_cert_chain should not have been called as we have not supplied client " \ - "certs" + assert not mocked_load_cert_chain.called, "ssl_context.load_cert_chain should not have been called as we have not supplied " \ + "client certs" self.assertEqual(hosts, f.hosts) self.assertTrue(f.ssl_context.check_hostname) @@ -142,8 +142,8 @@ def test_create_https_connection_only_verify_self_signed_server_certificate(self mock.call("SSL client authentication: off") ]) - assert not mocked_load_cert_chain.called, "ssl_context.load_cert_chain should not have been called as we have not supplied client " \ - "certs" + assert not mocked_load_cert_chain.called, "ssl_context.load_cert_chain should not have been called as we have not supplied " \ + "client certs" self.assertEqual(hosts, f.hosts) self.assertTrue(f.ssl_context.check_hostname) self.assertEqual(ssl.CERT_REQUIRED, f.ssl_context.verify_mode) @@ -217,8 +217,8 @@ def test_create_https_connection_unverified_certificate(self, mocked_load_cert_c mock.call("SSL client authentication: off") ]) - assert not mocked_load_cert_chain.called, "ssl_context.load_cert_chain should not have been called as we have not supplied client " \ - "certs" + assert not mocked_load_cert_chain.called, "ssl_context.load_cert_chain should not have been called as we have not supplied " \ + "client certs" self.assertEqual(hosts, f.hosts) self.assertFalse(f.ssl_context.check_hostname) diff --git a/tests/driver/runner_test.py b/tests/driver/runner_test.py index fa7d8ad99..322282b08 100644 --- a/tests/driver/runner_test.py +++ b/tests/driver/runner_test.py @@ -328,7 +328,7 @@ def test_bulk_index_error_no_shards(self, es): "_type": "doc", "_id": "1", "status": 429, - "error": "EsRejectedExecutionException[rejected execution (queue capacity 50) on org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase$1@1]" + "error": "EsRejectedExecutionException[rejected execution (queue capacity 50) on org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase$1@1]" # pylint: disable=line-too-long } }, { @@ -337,7 +337,7 @@ def test_bulk_index_error_no_shards(self, es): "_type": "doc", "_id": "2", "status": 429, - "error": "EsRejectedExecutionException[rejected execution (queue capacity 50) on org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase$1@2]" + "error": "EsRejectedExecutionException[rejected execution (queue capacity 50) on org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase$1@2]" # pylint: disable=line-too-long } }, { @@ -346,7 +346,7 @@ def test_bulk_index_error_no_shards(self, es): "_type": "doc", "_id": "3", "status": 429, - "error": "EsRejectedExecutionException[rejected execution (queue capacity 50) on org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase$1@3]" + "error": "EsRejectedExecutionException[rejected execution (queue capacity 50) on org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$PrimaryPhase$1@3]" # pylint: disable=line-too-long } } ] diff --git a/tests/mechanic/launcher_test.py b/tests/mechanic/launcher_test.py index a5247f68a..088d226c4 100644 --- a/tests/mechanic/launcher_test.py +++ b/tests/mechanic/launcher_test.py @@ -192,7 +192,7 @@ def test_env_options_order(self): self.assertEqual("/java_home/bin" + os.pathsep + os.environ["PATH"], env["PATH"]) self.assertEqual("-XX:+ExitOnOutOfMemoryError -XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints " "-XX:+UnlockCommercialFeatures -XX:+FlightRecorder " - "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true,dumponexitpath=/tmp/telemetry/profile.jfr " + "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true,dumponexitpath=/tmp/telemetry/profile.jfr " # pylint: disable=line-too-long "-XX:StartFlightRecording=defaultrecording=true", env["ES_JAVA_OPTS"]) diff --git a/tests/racecontrol_test.py b/tests/racecontrol_test.py index 58256de1e..48124bf3b 100644 --- a/tests/racecontrol_test.py +++ b/tests/racecontrol_test.py @@ -70,7 +70,8 @@ def test_fails_without_benchmark_only_pipeline_in_docker(self): self.assertEqual( "Only the [benchmark-only] pipeline is supported by the Rally Docker image.\n" "Add --pipeline=benchmark-only in your Rally arguments and try again.\n" - "For more details read the docs for the benchmark-only pipeline in https://esrally.readthedocs.io/en/latest/pipelines.html#benchmark-only\n", + "For more details read the docs for the benchmark-only pipeline in " + "https://esrally.readthedocs.io/en/latest/pipelines.html#benchmark-only\n", ctx.exception.args[0]) del racecontrol.pipelines[test_pipeline_name] diff --git a/tests/telemetry_test.py b/tests/telemetry_test.py index d3421b3ff..a2439670e 100644 --- a/tests/telemetry_test.py +++ b/tests/telemetry_test.py @@ -162,8 +162,8 @@ class JfrTests(TestCase): def test_sets_options_for_pre_java_9_default_recording_template(self): jfr = telemetry.FlightRecorder(telemetry_params={}, log_root="/var/log", java_major_version=random.randint(0, 8)) java_opts = jfr.java_opts("/var/log/test-recording.jfr") - self.assertEqual(["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", "-XX:+FlightRecorder", - "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true," + self.assertEqual(["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", + "-XX:+FlightRecorder", "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true," "dumponexitpath=/var/log/test-recording.jfr", "-XX:StartFlightRecording=defaultrecording=true"], java_opts) def test_sets_options_for_java_9_or_10_default_recording_template(self): @@ -185,9 +185,10 @@ def test_sets_options_for_pre_java_9_custom_recording_template(self): log_root="/var/log", java_major_version=random.randint(0, 8)) java_opts = jfr.java_opts("/var/log/test-recording.jfr") - self.assertEqual(["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", "-XX:+FlightRecorder", - "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true," - "dumponexitpath=/var/log/test-recording.jfr", "-XX:StartFlightRecording=defaultrecording=true,settings=profile"], java_opts) + self.assertEqual(["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", + "-XX:+FlightRecorder", "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true," + "dumponexitpath=/var/log/test-recording.jfr", + "-XX:StartFlightRecording=defaultrecording=true,settings=profile"], java_opts) def test_sets_options_for_java_9_or_10_custom_recording_template(self): jfr = telemetry.FlightRecorder(telemetry_params={"recording-template": "profile"}, @@ -213,8 +214,8 @@ def test_sets_options_for_pre_java_9(self): gc = telemetry.Gc("/var/log", java_major_version=random.randint(0, 8)) gc_java_opts = gc.java_opts("/var/log/defaults-node-0.gc.log") self.assertEqual(7, len(gc_java_opts)) - self.assertEqual(["-Xloggc:/var/log/defaults-node-0.gc.log", "-XX:+PrintGCDetails", "-XX:+PrintGCDateStamps", "-XX:+PrintGCTimeStamps", - "-XX:+PrintGCApplicationStoppedTime", "-XX:+PrintGCApplicationConcurrentTime", + self.assertEqual(["-Xloggc:/var/log/defaults-node-0.gc.log", "-XX:+PrintGCDetails", "-XX:+PrintGCDateStamps", + "-XX:+PrintGCTimeStamps", "-XX:+PrintGCApplicationStoppedTime", "-XX:+PrintGCApplicationConcurrentTime", "-XX:+PrintTenuringDistribution"], gc_java_opts) def test_sets_options_for_java_9_or_above(self): @@ -1974,7 +1975,7 @@ def test_resilient_if_error_response(self, metrics_store_add_meta_info): class DiskIoTests(TestCase): - + @mock.patch("esrally.utils.sysstats.process_io_counters") @mock.patch("esrally.metrics.EsMetricsStore.put_count_node_level") def test_diskio_process_io_counters(self, metrics_store_node_count, process_io_counters): @@ -1985,7 +1986,7 @@ def test_diskio_process_io_counters(self, metrics_store_node_count, process_io_c cfg = create_config() metrics_store = metrics.EsMetricsStore(cfg) - + device = telemetry.DiskIo(node_count_on_host=1) t = telemetry.Telemetry(enabled_devices=[], devices=[device]) node = cluster.Node(pid=None, binary_path="/bin", host_name="localhost", node_name="rally0", telemetry=t) @@ -2000,9 +2001,9 @@ def test_diskio_process_io_counters(self, metrics_store_node_count, process_io_c metrics_store_node_count.assert_has_calls([ mock.call("rally0", "disk_io_write_bytes", 1, "byte"), mock.call("rally0", "disk_io_read_bytes", 1, "byte") - + ]) - + @mock.patch("esrally.utils.sysstats.disk_io_counters") @mock.patch("esrally.utils.sysstats.process_io_counters") @mock.patch("esrally.metrics.EsMetricsStore.put_count_node_level") @@ -2012,10 +2013,10 @@ def test_diskio_disk_io_counters(self, metrics_store_node_count, process_io_coun process_stop = Diskio(13, 13) disk_io_counters.side_effect = [process_start, process_stop] process_io_counters.side_effect = [None, None] - + cfg = create_config() metrics_store = metrics.EsMetricsStore(cfg) - + device = telemetry.DiskIo(node_count_on_host=2) t = telemetry.Telemetry(enabled_devices=[], devices=[device]) node = cluster.Node(pid=None, binary_path="/bin", host_name="localhost", node_name="rally0", telemetry=t) @@ -2027,7 +2028,7 @@ def test_diskio_disk_io_counters(self, metrics_store_node_count, process_io_coun t.detach_from_node(node, running=False) t.store_system_metrics(node, metrics_store) - # expected result is 1 byte because there are two nodes on the machine. Result is calculated + # expected result is 1 byte because there are two nodes on the machine. Result is calculated # with total_bytes / node_count metrics_store_node_count.assert_has_calls([ mock.call("rally0", "disk_io_write_bytes", 1, "byte"), diff --git a/tests/utils/opts_test.py b/tests/utils/opts_test.py index 1cf0a5201..046d7b556 100644 --- a/tests/utils/opts_test.py +++ b/tests/utils/opts_test.py @@ -159,7 +159,9 @@ def test_csv_hosts_parses(self): opts.TargetHosts(target_hosts).default) def test_jsonstring_parses_as_dict_of_clusters(self): - target_hosts = '{"default": ["127.0.0.1:9200","10.17.0.5:19200"], "remote_1": ["88.33.22.15:19200"], "remote_2": ["10.18.0.6:19200","10.18.0.7:19201"]}' + target_hosts = ('{"default": ["127.0.0.1:9200","10.17.0.5:19200"],' + ' "remote_1": ["88.33.22.15:19200"],' + ' "remote_2": ["10.18.0.6:19200","10.18.0.7:19201"]}') self.assertEqual( {'default': ['127.0.0.1:9200','10.17.0.5:19200'], From bab1ea0a130178ffbbed6be188944a1e15459448 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Thu, 9 Jan 2020 08:08:22 -0600 Subject: [PATCH 2/5] Fix a new line length --- tests/mechanic/supplier_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/mechanic/supplier_test.py b/tests/mechanic/supplier_test.py index 209cf6c5c..620c104d7 100644 --- a/tests/mechanic/supplier_test.py +++ b/tests/mechanic/supplier_test.py @@ -568,7 +568,8 @@ def test_release_repo_config_with_default_url(self, os_name, cpu_arch): renderer = supplier.TemplateRenderer(version="7.3.2") repo = supplier.DistributionRepository(name="release", distribution_config={ "runtime.jdk.bundled": "true", - "jdk.bundled.release_url": "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}-{{OSNAME}}-{{ARCH}}.tar.gz", + "jdk.bundled.release_url": + "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}-{{OSNAME}}-{{ARCH}}.tar.gz", "release.cache": "true" }, template_renderer=renderer) self.assertEqual("https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.3.2-linux-x86_64.tar.gz", repo.download_url) From 3eb8a773bab25dfb1f8240ba180862ca5a1aa36d Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Thu, 9 Jan 2020 12:33:15 -0600 Subject: [PATCH 3/5] Remove from pylintrc --- .pylintrc | 1 - 1 file changed, 1 deletion(-) diff --git a/.pylintrc b/.pylintrc index ad479d04f..a83a765c3 100644 --- a/.pylintrc +++ b/.pylintrc @@ -167,7 +167,6 @@ disable=print-statement, too-many-statements, inconsistent-return-statements, C0121, - C0301, C0302, C0303, C0326, From 15cf6e43c979304df2b146984d5c3c06e6488c29 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Fri, 10 Jan 2020 09:20:55 -0600 Subject: [PATCH 4/5] Temp commit to be removed once the jenkins job is tested properly --- esrally/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/esrally/client.py b/esrally/client.py index e65faf68f..ec28e5cb7 100644 --- a/esrally/client.py +++ b/esrally/client.py @@ -23,7 +23,7 @@ from esrally import exceptions, doc_link from esrally.utils import console - +# THIS IS A LONG LINE TO TEST THE FAILURE OF THE JENKINS JOBS ONCE THE PIPEFAIL FIX IS MADE. THIS LINE WILL BE REMOVED, EVENTUALLY. IS THIS LONG ENUF YET? class EsClientFactory: """ Abstracts how the Elasticsearch client is created. Intended for testing. From a317727f25b1cb464529ad58a3725109c1c02714 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Mon, 13 Jan 2020 12:07:17 -0600 Subject: [PATCH 5/5] Revert "Temp commit to be removed once the jenkins job is tested properly" This reverts commit 15cf6e43c979304df2b146984d5c3c06e6488c29. --- esrally/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/esrally/client.py b/esrally/client.py index ec28e5cb7..e65faf68f 100644 --- a/esrally/client.py +++ b/esrally/client.py @@ -23,7 +23,7 @@ from esrally import exceptions, doc_link from esrally.utils import console -# THIS IS A LONG LINE TO TEST THE FAILURE OF THE JENKINS JOBS ONCE THE PIPEFAIL FIX IS MADE. THIS LINE WILL BE REMOVED, EVENTUALLY. IS THIS LONG ENUF YET? + class EsClientFactory: """ Abstracts how the Elasticsearch client is created. Intended for testing.