Skip to content

Commit

Permalink
feat: support for writing pandas DataFrame (#79)
Browse files Browse the repository at this point in the history
  • Loading branch information
rolincova committed Apr 28, 2020
1 parent ab3915f commit 212cee3
Show file tree
Hide file tree
Showing 2 changed files with 118 additions and 14 deletions.
96 changes: 82 additions & 14 deletions influxdb_client/client/write_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,21 +183,24 @@ def __init__(self, influxdb_client, write_options: WriteOptions = WriteOptions()
def write(self, bucket: str, org: str = None,
record: Union[
str, List['str'], Point, List['Point'], dict, List['dict'], bytes, List['bytes'], Observable] = None,
write_precision: WritePrecision = DEFAULT_WRITE_PRECISION) -> None:
write_precision: WritePrecision = DEFAULT_WRITE_PRECISION, data_frame_measurement_name: str = None,
data_frame_tag_columns: List['str'] = None) -> None:
"""
Writes time-series data into influxdb.
:param str org: specifies the destination organization for writes; take either the ID or Name interchangeably; if both orgID and org are specified, org takes precedence. (required)
:param str bucket: specifies the destination bucket for writes (required)
:param WritePrecision write_precision: specifies the precision for the unix timestamps within the body line-protocol
:param record: Points, line protocol, RxPY Observable to write
:param record: Points, line protocol, Pandas DataFrame, RxPY Observable to write
:param data_frame_measurement_name: name of measurement for writing Pandas DataFrame
:param data_frame_tag_columns: list of DataFrame columns which are tags, rest columns will be fields
"""

if org is None:
org = self._influxdb_client.org

if self._point_settings.defaultTags and record:
if self._point_settings.defaultTags and record is not None:
for key, val in self._point_settings.defaultTags.items():
if isinstance(record, dict):
record.get("tags")[key] = val
Expand All @@ -211,7 +214,9 @@ def write(self, bucket: str, org: str = None,
if self._write_options.write_type is WriteType.batching:
return self._write_batching(bucket, org, record, write_precision)

final_string = self._serialize(record, write_precision)
final_string = self._serialize(record, write_precision,
data_frame_measurement_name,
data_frame_tag_columns)

_async_req = True if self._write_options.write_type == WriteType.asynchronous else False

Expand All @@ -235,7 +240,7 @@ def __del__(self):
self._disposable = None
pass

def _serialize(self, record, write_precision) -> bytes:
def _serialize(self, record, write_precision, data_frame_measurement_name, data_frame_tag_columns) -> bytes:
_result = b''
if isinstance(record, bytes):
_result = record
Expand All @@ -244,40 +249,103 @@ def _serialize(self, record, write_precision) -> bytes:
_result = record.encode("utf-8")

elif isinstance(record, Point):
_result = self._serialize(record.to_line_protocol(), write_precision=write_precision)
_result = self._serialize(record.to_line_protocol(), write_precision,
data_frame_measurement_name, data_frame_tag_columns)

elif isinstance(record, dict):
_result = self._serialize(Point.from_dict(record, write_precision=write_precision),
write_precision=write_precision)
write_precision,
data_frame_measurement_name, data_frame_tag_columns)
elif 'DataFrame' in type(record).__name__:
_result = self._serialize(self._data_frame_to_list_of_points(record, data_frame_measurement_name,
data_frame_tag_columns,
precision=write_precision),
write_precision,
data_frame_measurement_name, data_frame_tag_columns)

elif isinstance(record, list):
_result = b'\n'.join([self._serialize(item, write_precision=write_precision) for item in record])
_result = b'\n'.join([self._serialize(item, write_precision,
data_frame_measurement_name, data_frame_tag_columns) for item in record])

return _result

def _write_batching(self, bucket, org, data, precision=DEFAULT_WRITE_PRECISION):
def _write_batching(self, bucket, org, data,
data_frame_measurement_name, data_frame_tag_columns,
precision=DEFAULT_WRITE_PRECISION):
_key = _BatchItemKey(bucket, org, precision)
if isinstance(data, bytes):
self._subject.on_next(_BatchItem(key=_key, data=data))

elif isinstance(data, str):
self._write_batching(bucket, org, data.encode("utf-8"), precision)
self._write_batching(bucket, org, data.encode("utf-8"),
data_frame_measurement_name, data_frame_tag_columns, precision)

elif isinstance(data, Point):
self._write_batching(bucket, org, data.to_line_protocol(), precision)
self._write_batching(bucket, org, data.to_line_protocol(),
data_frame_measurement_name, data_frame_tag_columns, precision)

elif isinstance(data, dict):
self._write_batching(bucket, org, Point.from_dict(data, write_precision=precision), precision)
self._write_batching(bucket, org, Point.from_dict(data, write_precision=precision),
data_frame_measurement_name, data_frame_tag_columns, precision)

elif 'DataFrame' in type(data).__name__:
self._write_batching(bucket, org, self._data_frame_to_list_of_points(data, data_frame_measurement_name,
data_frame_tag_columns, precision),
data_frame_measurement_name, data_frame_tag_columns, precision)

elif isinstance(data, list):
for item in data:
self._write_batching(bucket, org, item, precision)
self._write_batching(bucket, org, item,
data_frame_measurement_name, data_frame_tag_columns, precision)

elif isinstance(data, Observable):
data.subscribe(lambda it: self._write_batching(bucket, org, it, precision))
data.subscribe(lambda it: self._write_batching(bucket, org, it,
data_frame_measurement_name, data_frame_tag_columns,
precision))
pass

return None

def _data_frame_to_list_of_points(self, dataframe, data_frame_measurement_name, data_frame_tag_columns, precision='s'):
from ..extras import pd
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {0}.'
.format(type(dataframe)))
if not (isinstance(dataframe.index, pd.PeriodIndex) or
isinstance(dataframe.index, pd.DatetimeIndex)):
raise TypeError('Must be DataFrame with DatetimeIndex or \
PeriodIndex.')

if isinstance(dataframe.index, pd.PeriodIndex):
dataframe.index = dataframe.index.to_timestamp()
else:
dataframe.index = pd.to_datetime(dataframe.index)

if dataframe.index.tzinfo is None:
dataframe.index = dataframe.index.tz_localize('UTC')

data = []

c = 0
for v in dataframe.values:
point = Point(measurement_name=data_frame_measurement_name)

count = 0
for f in v:
column = dataframe.columns[count]
if data_frame_tag_columns and column in data_frame_tag_columns:
point.tag(column, f)
else:
point.field(column, f)
count += 1

point.time(dataframe.index[c], precision)
c += 1

data.append(point)

return data

def _http(self, batch_item: _BatchItem):

logger.debug("Write time series data into InfluxDB: %s", batch_item)
Expand Down
36 changes: 36 additions & 0 deletions tests/test_WriteApi.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import os
import unittest
import time
from datetime import timedelta
from multiprocessing.pool import ApplyResult

from influxdb_client import Point, WritePrecision, InfluxDBClient
Expand Down Expand Up @@ -224,6 +225,41 @@ def test_write_bytes(self):

self.delete_test_bucket(_bucket)

def test_write_data_frame(self):
from influxdb_client.extras import pd

bucket = self.create_test_bucket()

now = pd.Timestamp('1970-01-01 00:00+00:00')
data_frame = pd.DataFrame(data=[["coyote_creek", 1.0], ["coyote_creek", 2.0]],
index=[now + timedelta(hours=1), now + timedelta(hours=2)],
columns=["location", "water_level"])

self.write_client.write(bucket.name, record=data_frame, data_frame_measurement_name='h2o_feet',
data_frame_tag_columns=['location'])

result = self.query_api.query(
"from(bucket:\"" + bucket.name + "\") |> range(start: 1970-01-01T00:00:00.000000001Z)", self.org)

self.assertEqual(1, len(result))
self.assertEqual(2, len(result[0].records))

self.assertEqual(result[0].records[0].get_measurement(), "h2o_feet")
self.assertEqual(result[0].records[0].get_value(), 1.0)
self.assertEqual(result[0].records[0].values.get("location"), "coyote_creek")
self.assertEqual(result[0].records[0].get_field(), "water_level")
self.assertEqual(result[0].records[0].get_time(),
datetime.datetime(1970, 1, 1, 1, 0, tzinfo=datetime.timezone.utc))

self.assertEqual(result[0].records[1].get_measurement(), "h2o_feet")
self.assertEqual(result[0].records[1].get_value(), 2.0)
self.assertEqual(result[0].records[1].values.get("location"), "coyote_creek")
self.assertEqual(result[0].records[1].get_field(), "water_level")
self.assertEqual(result[0].records[1].get_time(),
datetime.datetime(1970, 1, 1, 2, 0, tzinfo=datetime.timezone.utc))

self.delete_test_bucket(bucket)

def test_use_default_org(self):
bucket = self.create_test_bucket()

Expand Down

0 comments on commit 212cee3

Please sign in to comment.