forked from locustio/locust
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request locustio#33 from erlanggakrisnamukti/develop
Merge develop to master
- Loading branch information
Showing
11 changed files
with
407 additions
and
37 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
import os, json, logging | ||
|
||
logger = logging.getLogger(__name__) | ||
config_path = '/tests/settings/config.json' | ||
|
||
def read_file(): | ||
""" | ||
Will read the file and return it as a string with tree view. | ||
""" | ||
try: | ||
with open((os.environ['PYTHONPATH'].split(os.pathsep))[-1] + config_path, "r") as data_file: | ||
data = data_file.read() | ||
data_file.close() | ||
except Exception as err: | ||
logger.info(err) | ||
data = "{}" | ||
return data | ||
|
||
def write_file(string_json): | ||
""" | ||
The `string_json` will overwrite existing configuration. | ||
If the previous configuration doesn't exist, then it will create the file. | ||
""" | ||
status, message = None, None | ||
try: | ||
with open((os.environ['PYTHONPATH'].split(os.pathsep))[-1] + config_path, "w") as data_file: | ||
data_file.write(string_json) | ||
status = True | ||
message = 'Configuration has been saved' | ||
except Exception as err: | ||
logger.info(err) | ||
status = False | ||
message = "Can't save the configuration :" + err | ||
return status, message | ||
|
||
class ClientConfiguration: | ||
""" | ||
This class is a handler for data configuration with JSON data structure. | ||
""" | ||
|
||
config_data = None | ||
|
||
def read_json(self): | ||
""" | ||
Will get the data of configuration as JSON. | ||
It reads configuration file once. | ||
""" | ||
if self.config_data is None: | ||
try: | ||
with open((os.environ['PYTHONPATH'].split(os.pathsep))[-1] + config_path, "r") as data_file: | ||
self.config_data = json.load(data_file) | ||
data_file.close() | ||
except Exception as err: | ||
logger.info(err) | ||
self.config_data = json.load({}) | ||
return self.config_data | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
import json | ||
import pandas as pd | ||
|
||
class csvToJson: | ||
|
||
def __init__(self, file_path): | ||
""" | ||
Initialize object's attribute | ||
:param string file_path: path of csv file | ||
""" | ||
self.file_path = file_path | ||
self.df = pd.read_csv(file_path) | ||
|
||
def get_columns_name(self): | ||
""" | ||
Return column's name which exist in the file | ||
:returns: column's name in list object type | ||
""" | ||
return self.df.keys().tolist() | ||
|
||
def _get_nested_record(self, grouped_data_key, grouped_data, array_column, regular_column): | ||
""" | ||
Set a record which is consist of regular column and column which has array type value. | ||
:param list grouped_data_key: data key of grouped data | ||
:param list grouped_data: data of grouped data | ||
:param list array_column: column's name which value's data type is list | ||
:param list regular_column: column's name which value's data type is numbers or string | ||
:returns: a data in the shape of dictionary | ||
""" | ||
|
||
record = {} | ||
# check whether grouped_data_key tuple or not. if not (only exist one key), turn into tuple | ||
if isinstance(grouped_data_key, int): | ||
grouped_data_key = (grouped_data_key,) | ||
|
||
# assign key and value which come from regular column | ||
for i in range(len(grouped_data_key)): | ||
record[regular_column[i]] = grouped_data_key[i] | ||
|
||
# assign key and value which come from column which has array type value | ||
for field in array_column: | ||
record[field] = list(grouped_data[field].unique()) | ||
|
||
return record | ||
|
||
def _get_array_record(self, array_column): | ||
""" | ||
Set a record which is consist of column and its value data type is array. | ||
:param list array_column: column's name which value's data type is list | ||
:param dataframe df: raw data which want to be processed | ||
:returns: a data in the shape of dictionary | ||
""" | ||
|
||
record = {} | ||
for field in array_column: | ||
record[field] = list(self.df[field].unique()) | ||
|
||
return record | ||
|
||
def convert(self, array_column): | ||
""" | ||
Convert csv data into json data with only 1 depth level. | ||
:param string csv_path: path of csv which want to be converted into json format | ||
:param list array_column: column's name which value's data type is list | ||
:returns: list object | ||
""" | ||
|
||
# collect column's name which is not having array as its value | ||
regular_column = [] | ||
for column in self.get_columns_name(): | ||
if column not in array_column: regular_column.append(column) | ||
|
||
records = None | ||
# if csv contains only one column and the column definitely acts as array | ||
if len(self.df.columns)==1: | ||
records= self._get_array_record(self.get_columns_name()) | ||
# if csv contains more than one columns and all of it act as array | ||
elif not regular_column: | ||
records= self._get_array_record(array_column) | ||
# group csv data by regular column data | ||
else: | ||
records = [] | ||
for grouped_data_key, grouped_data in self.df.groupby(regular_column): | ||
record = self._get_nested_record(grouped_data_key, grouped_data, array_column, regular_column) | ||
records.append(record) | ||
|
||
return records |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.