code stringlengths 66 870k | docstring stringlengths 19 26.7k | func_name stringlengths 1 138 | language stringclasses 1
value | repo stringlengths 7 68 | path stringlengths 5 324 | url stringlengths 46 389 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def test_get_headers(url, body, expected_content_type):
"""Test header extraction and content type detection."""
parsed_url = apprise_parse_url(url)
headers = _get_headers(parsed_url, body)
if expected_content_type:
assert headers.get("Content-Type") == expected_content_type | Test header extraction and content type detection. | test_get_headers | python | dgtlmoon/changedetection.io | changedetectionio/tests/apprise/test_apprise_custom_api_call.py | https://github.com/dgtlmoon/changedetection.io/blob/master/changedetectionio/tests/apprise/test_apprise_custom_api_call.py | Apache-2.0 |
def test_get_params(url, expected_params):
"""Test parameter extraction with URL encoding and exclusion logic."""
parsed_url = apprise_parse_url(url)
params = _get_params(parsed_url)
assert dict(params) == expected_params | Test parameter extraction with URL encoding and exclusion logic. | test_get_params | python | dgtlmoon/changedetection.io | changedetectionio/tests/apprise/test_apprise_custom_api_call.py | https://github.com/dgtlmoon/changedetection.io/blob/master/changedetectionio/tests/apprise/test_apprise_custom_api_call.py | Apache-2.0 |
def test_apprise_custom_api_call_success(mock_request, url, schema, method):
"""Test successful API calls with different HTTP methods and schemas."""
mock_request.return_value.raise_for_status.return_value = None
meta = {"url": url, "schema": schema}
result = apprise_http_custom_handler(
body="test body", title="Test Title", notify_type="info", meta=meta
)
assert result is True
mock_request.assert_called_once()
call_args = mock_request.call_args
assert call_args[1]["method"] == method.upper()
assert call_args[1]["url"].startswith("http") | Test successful API calls with different HTTP methods and schemas. | test_apprise_custom_api_call_success | python | dgtlmoon/changedetection.io | changedetectionio/tests/apprise/test_apprise_custom_api_call.py | https://github.com/dgtlmoon/changedetection.io/blob/master/changedetectionio/tests/apprise/test_apprise_custom_api_call.py | Apache-2.0 |
def test_invalid_url_parsing():
"""Test handling of invalid URL parsing."""
meta = {"url": "invalid://url", "schema": "invalid"}
result = apprise_http_custom_handler(
body="test", title="Invalid URL", notify_type="info", meta=meta
)
assert result is False | Test handling of invalid URL parsing. | test_invalid_url_parsing | python | dgtlmoon/changedetection.io | changedetectionio/tests/apprise/test_apprise_custom_api_call.py | https://github.com/dgtlmoon/changedetection.io/blob/master/changedetectionio/tests/apprise/test_apprise_custom_api_call.py | Apache-2.0 |
def test_https_method_conversion(
mock_request, input_schema, expected_method
):
"""Validate that methods ending with 's' use HTTPS and correct HTTP method."""
mock_request.return_value.raise_for_status.return_value = None
url = f"{input_schema}://localhost:9999"
result = apprise_http_custom_handler(
body="test body",
title="Test Title",
notify_type="info",
meta={"url": url, "schema": input_schema},
)
assert result is True
mock_request.assert_called_once()
call_args = mock_request.call_args
assert call_args[1]["method"] == expected_method
assert call_args[1]["url"].startswith("https") | Validate that methods ending with 's' use HTTPS and correct HTTP method. | test_https_method_conversion | python | dgtlmoon/changedetection.io | changedetectionio/tests/apprise/test_apprise_custom_api_call.py | https://github.com/dgtlmoon/changedetection.io/blob/master/changedetectionio/tests/apprise/test_apprise_custom_api_call.py | Apache-2.0 |
def search_satellite_data(coordinates, cloud_cover_lt, product="landsat"):
"""
coordinates: bounding box's coordinates
cloud_cover_lt: maximum cloud cover
product: landsat, sentinel
"""
if product == "landsat":
product = "landsat-8-l1-c1"
elif product == "sentinel":
product = "sentinel-s2-l1c"
search = Search(
url="https://earth-search.aws.element84.com/v0",
bbox=get_tiny_bbox(coordinates),
query={
"eo:cloud_cover": {"lt": cloud_cover_lt},
},
sort=[{"field": "eo:cloud_cover", "direction": "asc"}],
collections=[product]
)
# improvement: filter by date, cloud cover here
search_items = search.items()
if not len(search_items):
exit_cli(print, "No data matched your search, please try different parameters.")
# return the first result
item = search_items[0]
return item |
coordinates: bounding box's coordinates
cloud_cover_lt: maximum cloud cover
product: landsat, sentinel
| search_satellite_data | python | plant99/felicette | felicette/sat_downloader.py | https://github.com/plant99/felicette/blob/master/felicette/sat_downloader.py | MIT |
def display_file(file_name):
"""
Open given file with default user program.
"""
if sys.platform.startswith("linux"):
os.system("xdg-open %s" % file_name)
elif sys.platform.startswith("darwin"):
os.system("open %s" % file_name) |
Open given file with default user program.
| display_file | python | plant99/felicette | felicette/utils/sys_utils.py | https://github.com/plant99/felicette/blob/master/felicette/utils/sys_utils.py | MIT |
def main(host='localhost', port=8086):
"""Instantiate the connection to the InfluxDB client."""
user = 'root'
password = 'root'
dbname = 'demo'
protocol = 'line'
client = DataFrameClient(host, port, user, password, dbname)
print("Create pandas DataFrame")
df = pd.DataFrame(data=list(range(30)),
index=pd.date_range(start='2014-11-16',
periods=30, freq='H'), columns=['0'])
print("Create database: " + dbname)
client.create_database(dbname)
print("Write DataFrame")
client.write_points(df, 'demo', protocol=protocol)
print("Write DataFrame with Tags")
client.write_points(df, 'demo',
{'k1': 'v1', 'k2': 'v2'}, protocol=protocol)
print("Read DataFrame")
client.query("select * from demo")
print("Delete database: " + dbname)
client.drop_database(dbname) | Instantiate the connection to the InfluxDB client. | main | python | influxdata/influxdb-python | examples/tutorial_pandas.py | https://github.com/influxdata/influxdb-python/blob/master/examples/tutorial_pandas.py | MIT |
def main(host='localhost', port=8086):
"""Define function to generate the sin wave."""
now = datetime.datetime.today()
points = []
for angle in range(0, 360):
y = 10 + math.sin(math.radians(angle)) * 10
point = {
"measurement": 'foobar',
"time": int(now.strftime('%s')) + angle,
"fields": {
"value": y
}
}
points.append(point)
client = InfluxDBClient(host, port, USER, PASSWORD, DBNAME)
print("Create database: " + DBNAME)
client.create_database(DBNAME)
client.switch_database(DBNAME)
# Write points
client.write_points(points)
time.sleep(3)
query = 'SELECT * FROM foobar'
print("Querying data: " + query)
result = client.query(query, database=DBNAME)
print("Result: {0}".format(result))
"""
You might want to comment the delete and plot the result on InfluxDB
Interface. Connect on InfluxDB Interface at http://127.0.0.1:8083/
Select the database tutorial -> Explore Data
Then run the following query:
SELECT * from foobar
"""
print("Delete database: " + DBNAME)
client.drop_database(DBNAME) | Define function to generate the sin wave. | main | python | influxdata/influxdb-python | examples/tutorial_sine_wave.py | https://github.com/influxdata/influxdb-python/blob/master/examples/tutorial_sine_wave.py | MIT |
def loads(s):
"""Generate a sequence of JSON values from a string."""
_decoder = json.JSONDecoder()
while s:
s = s.strip()
obj, pos = _decoder.raw_decode(s)
if not pos:
raise ValueError('no JSON object found at %i' % pos)
yield obj
s = s[pos:] | Generate a sequence of JSON values from a string. | loads | python | influxdata/influxdb-python | influxdb/chunked_json.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/chunked_json.py | MIT |
def from_dsn(cls, dsn, **kwargs):
r"""Generate an instance of InfluxDBClient from given data source name.
Return an instance of :class:`~.InfluxDBClient` from the provided
data source name. Supported schemes are "influxdb", "https+influxdb"
and "udp+influxdb". Parameters for the :class:`~.InfluxDBClient`
constructor may also be passed to this method.
:param dsn: data source name
:type dsn: string
:param kwargs: additional parameters for `InfluxDBClient`
:type kwargs: dict
:raises ValueError: if the provided DSN has any unexpected values
:Example:
::
>> cli = InfluxDBClient.from_dsn('influxdb://username:password@\
localhost:8086/databasename', timeout=5)
>> type(cli)
<class 'influxdb.client.InfluxDBClient'>
>> cli = InfluxDBClient.from_dsn('udp+influxdb://username:pass@\
localhost:8086/databasename', timeout=5, udp_port=159)
>> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli))
http://localhost:8086 - True 159
.. note:: parameters provided in `**kwargs` may override dsn parameters
.. note:: when using "udp+influxdb" the specified port (if any) will
be used for the TCP connection; specify the UDP port with the
additional `udp_port` parameter (cf. examples).
"""
init_args = _parse_dsn(dsn)
host, port = init_args.pop('hosts')[0]
init_args['host'] = host
init_args['port'] = port
init_args.update(kwargs)
return cls(**init_args) | Generate an instance of InfluxDBClient from given data source name.
Return an instance of :class:`~.InfluxDBClient` from the provided
data source name. Supported schemes are "influxdb", "https+influxdb"
and "udp+influxdb". Parameters for the :class:`~.InfluxDBClient`
constructor may also be passed to this method.
:param dsn: data source name
:type dsn: string
:param kwargs: additional parameters for `InfluxDBClient`
:type kwargs: dict
:raises ValueError: if the provided DSN has any unexpected values
:Example:
::
>> cli = InfluxDBClient.from_dsn('influxdb://username:password@\
localhost:8086/databasename', timeout=5)
>> type(cli)
<class 'influxdb.client.InfluxDBClient'>
>> cli = InfluxDBClient.from_dsn('udp+influxdb://username:pass@\
localhost:8086/databasename', timeout=5, udp_port=159)
>> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli))
http://localhost:8086 - True 159
.. note:: parameters provided in `**kwargs` may override dsn parameters
.. note:: when using "udp+influxdb" the specified port (if any) will
be used for the TCP connection; specify the UDP port with the
additional `udp_port` parameter (cf. examples).
| from_dsn | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def switch_user(self, username, password):
"""Change the client's username.
:param username: the username to switch to
:type username: str
:param password: the password for the username
:type password: str
"""
self._username = username
self._password = password | Change the client's username.
:param username: the username to switch to
:type username: str
:param password: the password for the username
:type password: str
| switch_user | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def request(self, url, method='GET', params=None, data=None, stream=False,
expected_response_code=200, headers=None):
"""Make a HTTP request to the InfluxDB API.
:param url: the path of the HTTP request, e.g. write, query, etc.
:type url: str
:param method: the HTTP method for the request, defaults to GET
:type method: str
:param params: additional parameters for the request, defaults to None
:type params: dict
:param data: the data of the request, defaults to None
:type data: str
:param stream: True if a query uses chunked responses
:type stream: bool
:param expected_response_code: the expected response code of
the request, defaults to 200
:type expected_response_code: int
:param headers: headers to add to the request
:type headers: dict
:returns: the response from the request
:rtype: :class:`requests.Response`
:raises InfluxDBServerError: if the response code is any server error
code (5xx)
:raises InfluxDBClientError: if the response code is not the
same as `expected_response_code` and is not a server error code
"""
url = "{0}/{1}".format(self._baseurl, url)
if headers is None:
headers = self._headers
if params is None:
params = {}
if isinstance(data, (dict, list)):
data = json.dumps(data)
if self._gzip:
# Receive and send compressed data
headers.update({
'Accept-Encoding': 'gzip',
'Content-Encoding': 'gzip',
})
if data is not None:
# For Py 2.7 compatability use Gzipfile
compressed = io.BytesIO()
with gzip.GzipFile(
compresslevel=9,
fileobj=compressed,
mode='w'
) as f:
f.write(data)
data = compressed.getvalue()
# Try to send the request more than once by default (see #103)
retry = True
_try = 0
while retry:
try:
if "Authorization" in headers:
auth = (None, None)
else:
auth = (self._username, self._password)
response = self._session.request(
method=method,
url=url,
auth=auth if None not in auth else None,
params=params,
data=data,
stream=stream,
headers=headers,
proxies=self._proxies,
verify=self._verify_ssl,
timeout=self._timeout
)
break
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout):
_try += 1
if self._retries != 0:
retry = _try < self._retries
if not retry:
raise
if method == "POST":
time.sleep((2 ** _try) * random.random() / 100.0)
type_header = response.headers and response.headers.get("Content-Type")
if type_header == "application/x-msgpack" and response.content:
response._msgpack = msgpack.unpackb(
packed=response.content,
ext_hook=_msgpack_parse_hook,
raw=False)
else:
response._msgpack = None
def reformat_error(response):
if response._msgpack:
return json.dumps(response._msgpack, separators=(',', ':'))
else:
return response.content
# if there's not an error, there must have been a successful response
if 500 <= response.status_code < 600:
raise InfluxDBServerError(reformat_error(response))
elif response.status_code == expected_response_code:
return response
else:
err_msg = reformat_error(response)
raise InfluxDBClientError(err_msg, response.status_code) | Make a HTTP request to the InfluxDB API.
:param url: the path of the HTTP request, e.g. write, query, etc.
:type url: str
:param method: the HTTP method for the request, defaults to GET
:type method: str
:param params: additional parameters for the request, defaults to None
:type params: dict
:param data: the data of the request, defaults to None
:type data: str
:param stream: True if a query uses chunked responses
:type stream: bool
:param expected_response_code: the expected response code of
the request, defaults to 200
:type expected_response_code: int
:param headers: headers to add to the request
:type headers: dict
:returns: the response from the request
:rtype: :class:`requests.Response`
:raises InfluxDBServerError: if the response code is any server error
code (5xx)
:raises InfluxDBClientError: if the response code is not the
same as `expected_response_code` and is not a server error code
| request | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def write(self, data, params=None, expected_response_code=204,
protocol='json'):
"""Write data to InfluxDB.
:param data: the data to be written
:type data: (if protocol is 'json') dict
(if protocol is 'line') sequence of line protocol strings
or single string
:param params: additional parameters for the request, defaults to None
:type params: dict
:param expected_response_code: the expected response code of the write
operation, defaults to 204
:type expected_response_code: int
:param protocol: protocol of input data, either 'json' or 'line'
:type protocol: str
:returns: True, if the write operation is successful
:rtype: bool
"""
headers = self._headers.copy()
headers['Content-Type'] = 'application/octet-stream'
if params:
precision = params.get('precision')
else:
precision = None
if protocol == 'json':
data = make_lines(data, precision).encode('utf-8')
elif protocol == 'line':
if isinstance(data, str):
data = [data]
data = ('\n'.join(data) + '\n').encode('utf-8')
self.request(
url="write",
method='POST',
params=params,
data=data,
expected_response_code=expected_response_code,
headers=headers
)
return True | Write data to InfluxDB.
:param data: the data to be written
:type data: (if protocol is 'json') dict
(if protocol is 'line') sequence of line protocol strings
or single string
:param params: additional parameters for the request, defaults to None
:type params: dict
:param expected_response_code: the expected response code of the write
operation, defaults to 204
:type expected_response_code: int
:param protocol: protocol of input data, either 'json' or 'line'
:type protocol: str
:returns: True, if the write operation is successful
:rtype: bool
| write | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def query(self,
query,
params=None,
bind_params=None,
epoch=None,
expected_response_code=200,
database=None,
raise_errors=True,
chunked=False,
chunk_size=0,
method="GET"):
"""Send a query to InfluxDB.
.. danger::
In order to avoid injection vulnerabilities (similar to `SQL
injection <https://www.owasp.org/index.php/SQL_Injection>`_
vulnerabilities), do not directly include untrusted data into the
``query`` parameter, use ``bind_params`` instead.
:param query: the actual query string
:type query: str
:param params: additional parameters for the request,
defaults to {}
:type params: dict
:param bind_params: bind parameters for the query:
any variable in the query written as ``'$var_name'`` will be
replaced with ``bind_params['var_name']``. Only works in the
``WHERE`` clause and takes precedence over ``params['params']``
:type bind_params: dict
:param epoch: response timestamps to be in epoch format either 'h',
'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is
RFC3339 UTC format with nanosecond precision
:type epoch: str
:param expected_response_code: the expected status code of response,
defaults to 200
:type expected_response_code: int
:param database: database to query, defaults to None
:type database: str
:param raise_errors: Whether or not to raise exceptions when InfluxDB
returns errors, defaults to True
:type raise_errors: bool
:param chunked: Enable to use chunked responses from InfluxDB.
With ``chunked`` enabled, one ResultSet is returned per chunk
containing all results within that chunk
:type chunked: bool
:param chunk_size: Size of each chunk to tell InfluxDB to use.
:type chunk_size: int
:param method: the HTTP method for the request, defaults to GET
:type method: str
:returns: the queried data
:rtype: :class:`~.ResultSet`
"""
if params is None:
params = {}
if bind_params is not None:
params_dict = json.loads(params.get('params', '{}'))
params_dict.update(bind_params)
params['params'] = json.dumps(params_dict)
params['q'] = query
params['db'] = database or self._database
if epoch is not None:
params['epoch'] = epoch
if chunked:
params['chunked'] = 'true'
if chunk_size > 0:
params['chunk_size'] = chunk_size
if query.lower().startswith("select ") and " into " in query.lower():
method = "POST"
response = self.request(
url="query",
method=method,
params=params,
data=None,
stream=chunked,
expected_response_code=expected_response_code
)
data = response._msgpack
if not data:
if chunked:
return self._read_chunked_response(response)
data = response.json()
results = [
ResultSet(result, raise_errors=raise_errors)
for result
in data.get('results', [])
]
# TODO(aviau): Always return a list. (This would be a breaking change)
if len(results) == 1:
return results[0]
return results | Send a query to InfluxDB.
.. danger::
In order to avoid injection vulnerabilities (similar to `SQL
injection <https://www.owasp.org/index.php/SQL_Injection>`_
vulnerabilities), do not directly include untrusted data into the
``query`` parameter, use ``bind_params`` instead.
:param query: the actual query string
:type query: str
:param params: additional parameters for the request,
defaults to {}
:type params: dict
:param bind_params: bind parameters for the query:
any variable in the query written as ``'$var_name'`` will be
replaced with ``bind_params['var_name']``. Only works in the
``WHERE`` clause and takes precedence over ``params['params']``
:type bind_params: dict
:param epoch: response timestamps to be in epoch format either 'h',
'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is
RFC3339 UTC format with nanosecond precision
:type epoch: str
:param expected_response_code: the expected status code of response,
defaults to 200
:type expected_response_code: int
:param database: database to query, defaults to None
:type database: str
:param raise_errors: Whether or not to raise exceptions when InfluxDB
returns errors, defaults to True
:type raise_errors: bool
:param chunked: Enable to use chunked responses from InfluxDB.
With ``chunked`` enabled, one ResultSet is returned per chunk
containing all results within that chunk
:type chunked: bool
:param chunk_size: Size of each chunk to tell InfluxDB to use.
:type chunk_size: int
:param method: the HTTP method for the request, defaults to GET
:type method: str
:returns: the queried data
:rtype: :class:`~.ResultSet`
| query | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def write_points(self,
points,
time_precision=None,
database=None,
retention_policy=None,
tags=None,
batch_size=None,
protocol='json',
consistency=None
):
"""Write to multiple time series names.
:param points: the list of points to be written in the database
:type points: list of dictionaries, each dictionary represents a point
:type points: (if protocol is 'json') list of dicts, where each dict
represents a point.
(if protocol is 'line') sequence of line protocol strings.
:param time_precision: Either 's', 'm', 'ms' or 'u', defaults to None
:type time_precision: str
:param database: the database to write the points to. Defaults to
the client's current database
:type database: str
:param tags: a set of key-value pairs associated with each point. Both
keys and values must be strings. These are shared tags and will be
merged with point-specific tags, defaults to None
:type tags: dict
:param retention_policy: the retention policy for the points. Defaults
to None
:type retention_policy: str
:param batch_size: value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation,
defaults to None
:type batch_size: int
:param protocol: Protocol for writing data. Either 'line' or 'json'.
:type protocol: str
:param consistency: Consistency for the points.
One of {'any','one','quorum','all'}.
:type consistency: str
:returns: True, if the operation is successful
:rtype: bool
.. note:: if no retention policy is specified, the default retention
policy for the database is used
"""
if batch_size and batch_size > 0:
for batch in self._batches(points, batch_size):
self._write_points(points=batch,
time_precision=time_precision,
database=database,
retention_policy=retention_policy,
tags=tags, protocol=protocol,
consistency=consistency)
return True
return self._write_points(points=points,
time_precision=time_precision,
database=database,
retention_policy=retention_policy,
tags=tags, protocol=protocol,
consistency=consistency) | Write to multiple time series names.
:param points: the list of points to be written in the database
:type points: list of dictionaries, each dictionary represents a point
:type points: (if protocol is 'json') list of dicts, where each dict
represents a point.
(if protocol is 'line') sequence of line protocol strings.
:param time_precision: Either 's', 'm', 'ms' or 'u', defaults to None
:type time_precision: str
:param database: the database to write the points to. Defaults to
the client's current database
:type database: str
:param tags: a set of key-value pairs associated with each point. Both
keys and values must be strings. These are shared tags and will be
merged with point-specific tags, defaults to None
:type tags: dict
:param retention_policy: the retention policy for the points. Defaults
to None
:type retention_policy: str
:param batch_size: value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation,
defaults to None
:type batch_size: int
:param protocol: Protocol for writing data. Either 'line' or 'json'.
:type protocol: str
:param consistency: Consistency for the points.
One of {'any','one','quorum','all'}.
:type consistency: str
:returns: True, if the operation is successful
:rtype: bool
.. note:: if no retention policy is specified, the default retention
policy for the database is used
| write_points | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def ping(self):
"""Check connectivity to InfluxDB.
:returns: The version of the InfluxDB the client is connected to
"""
response = self.request(
url="ping",
method='GET',
expected_response_code=204
)
return response.headers['X-Influxdb-Version'] | Check connectivity to InfluxDB.
:returns: The version of the InfluxDB the client is connected to
| ping | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def get_list_series(self, database=None, measurement=None, tags=None):
"""
Query SHOW SERIES returns the distinct series in your database.
FROM and WHERE clauses are optional.
:param measurement: Show all series from a measurement
:type id: string
:param tags: Show all series that match given tags
:type id: dict
:param database: the database from which the series should be
shows, defaults to client's current database
:type database: str
"""
database = database or self._database
query_str = 'SHOW SERIES'
if measurement:
query_str += ' FROM "{0}"'.format(measurement)
if tags:
query_str += ' WHERE ' + ' and '.join(["{0}='{1}'".format(k, v)
for k, v in tags.items()])
return list(
itertools.chain.from_iterable(
[
x.values()
for x in (self.query(query_str, database=database)
.get_points())
]
)
) |
Query SHOW SERIES returns the distinct series in your database.
FROM and WHERE clauses are optional.
:param measurement: Show all series from a measurement
:type id: string
:param tags: Show all series that match given tags
:type id: dict
:param database: the database from which the series should be
shows, defaults to client's current database
:type database: str
| get_list_series | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def create_retention_policy(self, name, duration, replication,
database=None,
default=False, shard_duration="0s"):
"""Create a retention policy for a database.
:param name: the name of the new retention policy
:type name: str
:param duration: the duration of the new retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention - meaning the data will
never be deleted - use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the replication of the retention policy
:type replication: str
:param database: the database for which the retention policy is
created. Defaults to current client's database
:type database: str
:param default: whether or not to set the policy as default
:type default: bool
:param shard_duration: the shard duration of the retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and
mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. Infinite retention is not supported. As a workaround,
specify a "1000w" duration to achieve an extremely long shard group
duration. Defaults to "0s", which is interpreted by the database
to mean the default value given the duration.
The minimum shard group duration is 1 hour.
:type shard_duration: str
"""
query_string = \
"CREATE RETENTION POLICY {0} ON {1} " \
"DURATION {2} REPLICATION {3} SHARD DURATION {4}".format(
quote_ident(name), quote_ident(database or self._database),
duration, replication, shard_duration)
if default is True:
query_string += " DEFAULT"
self.query(query_string, method="POST") | Create a retention policy for a database.
:param name: the name of the new retention policy
:type name: str
:param duration: the duration of the new retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention - meaning the data will
never be deleted - use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the replication of the retention policy
:type replication: str
:param database: the database for which the retention policy is
created. Defaults to current client's database
:type database: str
:param default: whether or not to set the policy as default
:type default: bool
:param shard_duration: the shard duration of the retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and
mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. Infinite retention is not supported. As a workaround,
specify a "1000w" duration to achieve an extremely long shard group
duration. Defaults to "0s", which is interpreted by the database
to mean the default value given the duration.
The minimum shard group duration is 1 hour.
:type shard_duration: str
| create_retention_policy | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def alter_retention_policy(self, name, database=None,
duration=None, replication=None,
default=None, shard_duration=None):
"""Modify an existing retention policy for a database.
:param name: the name of the retention policy to modify
:type name: str
:param database: the database for which the retention policy is
modified. Defaults to current client's database
:type database: str
:param duration: the new duration of the existing retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention, meaning the data will
never be deleted, use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the new replication of the existing
retention policy
:type replication: int
:param default: whether or not to set the modified policy as default
:type default: bool
:param shard_duration: the shard duration of the retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and
mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. Infinite retention is not supported. As a workaround,
specify a "1000w" duration to achieve an extremely long shard group
duration.
The minimum shard group duration is 1 hour.
:type shard_duration: str
.. note:: at least one of duration, replication, or default flag
should be set. Otherwise the operation will fail.
"""
query_string = (
"ALTER RETENTION POLICY {0} ON {1}"
).format(quote_ident(name),
quote_ident(database or self._database))
if duration:
query_string += " DURATION {0}".format(duration)
if shard_duration:
query_string += " SHARD DURATION {0}".format(shard_duration)
if replication:
query_string += " REPLICATION {0}".format(replication)
if default is True:
query_string += " DEFAULT"
self.query(query_string, method="POST") | Modify an existing retention policy for a database.
:param name: the name of the retention policy to modify
:type name: str
:param database: the database for which the retention policy is
modified. Defaults to current client's database
:type database: str
:param duration: the new duration of the existing retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention, meaning the data will
never be deleted, use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the new replication of the existing
retention policy
:type replication: int
:param default: whether or not to set the modified policy as default
:type default: bool
:param shard_duration: the shard duration of the retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and
mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. Infinite retention is not supported. As a workaround,
specify a "1000w" duration to achieve an extremely long shard group
duration.
The minimum shard group duration is 1 hour.
:type shard_duration: str
.. note:: at least one of duration, replication, or default flag
should be set. Otherwise the operation will fail.
| alter_retention_policy | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def drop_retention_policy(self, name, database=None):
"""Drop an existing retention policy for a database.
:param name: the name of the retention policy to drop
:type name: str
:param database: the database for which the retention policy is
dropped. Defaults to current client's database
:type database: str
"""
query_string = (
"DROP RETENTION POLICY {0} ON {1}"
).format(quote_ident(name), quote_ident(database or self._database))
self.query(query_string, method="POST") | Drop an existing retention policy for a database.
:param name: the name of the retention policy to drop
:type name: str
:param database: the database for which the retention policy is
dropped. Defaults to current client's database
:type database: str
| drop_retention_policy | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def get_list_retention_policies(self, database=None):
"""Get the list of retention policies for a database.
:param database: the name of the database, defaults to the client's
current database
:type database: str
:returns: all retention policies for the database
:rtype: list of dictionaries
:Example:
::
>> ret_policies = client.get_list_retention_policies('my_db')
>> ret_policies
[{u'default': True,
u'duration': u'0',
u'name': u'default',
u'replicaN': 1}]
"""
if not (database or self._database):
raise InfluxDBClientError(
"get_list_retention_policies() requires a database as a "
"parameter or the client to be using a database")
rsp = self.query(
"SHOW RETENTION POLICIES ON {0}".format(
quote_ident(database or self._database))
)
return list(rsp.get_points()) | Get the list of retention policies for a database.
:param database: the name of the database, defaults to the client's
current database
:type database: str
:returns: all retention policies for the database
:rtype: list of dictionaries
:Example:
::
>> ret_policies = client.get_list_retention_policies('my_db')
>> ret_policies
[{u'default': True,
u'duration': u'0',
u'name': u'default',
u'replicaN': 1}]
| get_list_retention_policies | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def create_user(self, username, password, admin=False):
"""Create a new user in InfluxDB.
:param username: the new username to create
:type username: str
:param password: the password for the new user
:type password: str
:param admin: whether the user should have cluster administration
privileges or not
:type admin: boolean
"""
text = "CREATE USER {0} WITH PASSWORD {1}".format(
quote_ident(username), quote_literal(password))
if admin:
text += ' WITH ALL PRIVILEGES'
self.query(text, method="POST") | Create a new user in InfluxDB.
:param username: the new username to create
:type username: str
:param password: the password for the new user
:type password: str
:param admin: whether the user should have cluster administration
privileges or not
:type admin: boolean
| create_user | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def set_user_password(self, username, password):
"""Change the password of an existing user.
:param username: the username who's password is being changed
:type username: str
:param password: the new password for the user
:type password: str
"""
text = "SET PASSWORD FOR {0} = {1}".format(
quote_ident(username), quote_literal(password))
self.query(text) | Change the password of an existing user.
:param username: the username who's password is being changed
:type username: str
:param password: the new password for the user
:type password: str
| set_user_password | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def delete_series(self, database=None, measurement=None, tags=None):
"""Delete series from a database.
Series must be filtered by either measurement and tags.
This method cannot be used to delete all series, use
`drop_database` instead.
:param database: the database from which the series should be
deleted, defaults to client's current database
:type database: str
:param measurement: Delete all series from a measurement
:type measurement: str
:param tags: Delete all series that match given tags
:type tags: dict
"""
database = database or self._database
query_str = 'DROP SERIES'
if measurement:
query_str += ' FROM {0}'.format(quote_ident(measurement))
if tags:
tag_eq_list = ["{0}={1}".format(quote_ident(k), quote_literal(v))
for k, v in tags.items()]
query_str += ' WHERE ' + ' AND '.join(tag_eq_list)
self.query(query_str, database=database, method="POST") | Delete series from a database.
Series must be filtered by either measurement and tags.
This method cannot be used to delete all series, use
`drop_database` instead.
:param database: the database from which the series should be
deleted, defaults to client's current database
:type database: str
:param measurement: Delete all series from a measurement
:type measurement: str
:param tags: Delete all series that match given tags
:type tags: dict
| delete_series | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def grant_privilege(self, privilege, database, username):
"""Grant a privilege on a database to a user.
:param privilege: the privilege to grant, one of 'read', 'write'
or 'all'. The string is case-insensitive
:type privilege: str
:param database: the database to grant the privilege on
:type database: str
:param username: the username to grant the privilege to
:type username: str
"""
text = "GRANT {0} ON {1} TO {2}".format(privilege,
quote_ident(database),
quote_ident(username))
self.query(text, method="POST") | Grant a privilege on a database to a user.
:param privilege: the privilege to grant, one of 'read', 'write'
or 'all'. The string is case-insensitive
:type privilege: str
:param database: the database to grant the privilege on
:type database: str
:param username: the username to grant the privilege to
:type username: str
| grant_privilege | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def revoke_privilege(self, privilege, database, username):
"""Revoke a privilege on a database from a user.
:param privilege: the privilege to revoke, one of 'read', 'write'
or 'all'. The string is case-insensitive
:type privilege: str
:param database: the database to revoke the privilege on
:type database: str
:param username: the username to revoke the privilege from
:type username: str
"""
text = "REVOKE {0} ON {1} FROM {2}".format(privilege,
quote_ident(database),
quote_ident(username))
self.query(text, method="POST") | Revoke a privilege on a database from a user.
:param privilege: the privilege to revoke, one of 'read', 'write'
or 'all'. The string is case-insensitive
:type privilege: str
:param database: the database to revoke the privilege on
:type database: str
:param username: the username to revoke the privilege from
:type username: str
| revoke_privilege | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def get_list_continuous_queries(self):
"""Get the list of continuous queries in InfluxDB.
:return: all CQs in InfluxDB
:rtype: list of dictionaries
:Example:
::
>> cqs = client.get_list_cqs()
>> cqs
[
{
u'db1': []
},
{
u'db2': [
{
u'name': u'vampire',
u'query': u'CREATE CONTINUOUS QUERY vampire ON '
'mydb BEGIN SELECT count(dracula) INTO '
'mydb.autogen.all_of_them FROM '
'mydb.autogen.one GROUP BY time(5m) END'
}
]
}
]
"""
query_string = "SHOW CONTINUOUS QUERIES"
return [{sk[0]: list(p)} for sk, p in self.query(query_string).items()] | Get the list of continuous queries in InfluxDB.
:return: all CQs in InfluxDB
:rtype: list of dictionaries
:Example:
::
>> cqs = client.get_list_cqs()
>> cqs
[
{
u'db1': []
},
{
u'db2': [
{
u'name': u'vampire',
u'query': u'CREATE CONTINUOUS QUERY vampire ON '
'mydb BEGIN SELECT count(dracula) INTO '
'mydb.autogen.all_of_them FROM '
'mydb.autogen.one GROUP BY time(5m) END'
}
]
}
]
| get_list_continuous_queries | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def create_continuous_query(self, name, select, database=None,
resample_opts=None):
r"""Create a continuous query for a database.
:param name: the name of continuous query to create
:type name: str
:param select: select statement for the continuous query
:type select: str
:param database: the database for which the continuous query is
created. Defaults to current client's database
:type database: str
:param resample_opts: resample options
:type resample_opts: str
:Example:
::
>> select_clause = 'SELECT mean("value") INTO "cpu_mean" ' \
... 'FROM "cpu" GROUP BY time(1m)'
>> client.create_continuous_query(
... 'cpu_mean', select_clause, 'db_name', 'EVERY 10s FOR 2m'
... )
>> client.get_list_continuous_queries()
[
{
'db_name': [
{
'name': 'cpu_mean',
'query': 'CREATE CONTINUOUS QUERY "cpu_mean" '
'ON "db_name" '
'RESAMPLE EVERY 10s FOR 2m '
'BEGIN SELECT mean("value") '
'INTO "cpu_mean" FROM "cpu" '
'GROUP BY time(1m) END'
}
]
}
]
"""
query_string = (
"CREATE CONTINUOUS QUERY {0} ON {1}{2} BEGIN {3} END"
).format(quote_ident(name), quote_ident(database or self._database),
' RESAMPLE ' + resample_opts if resample_opts else '', select)
self.query(query_string) | Create a continuous query for a database.
:param name: the name of continuous query to create
:type name: str
:param select: select statement for the continuous query
:type select: str
:param database: the database for which the continuous query is
created. Defaults to current client's database
:type database: str
:param resample_opts: resample options
:type resample_opts: str
:Example:
::
>> select_clause = 'SELECT mean("value") INTO "cpu_mean" ' \
... 'FROM "cpu" GROUP BY time(1m)'
>> client.create_continuous_query(
... 'cpu_mean', select_clause, 'db_name', 'EVERY 10s FOR 2m'
... )
>> client.get_list_continuous_queries()
[
{
'db_name': [
{
'name': 'cpu_mean',
'query': 'CREATE CONTINUOUS QUERY "cpu_mean" '
'ON "db_name" '
'RESAMPLE EVERY 10s FOR 2m '
'BEGIN SELECT mean("value") '
'INTO "cpu_mean" FROM "cpu" '
'GROUP BY time(1m) END'
}
]
}
]
| create_continuous_query | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def drop_continuous_query(self, name, database=None):
"""Drop an existing continuous query for a database.
:param name: the name of continuous query to drop
:type name: str
:param database: the database for which the continuous query is
dropped. Defaults to current client's database
:type database: str
"""
query_string = (
"DROP CONTINUOUS QUERY {0} ON {1}"
).format(quote_ident(name), quote_ident(database or self._database))
self.query(query_string) | Drop an existing continuous query for a database.
:param name: the name of continuous query to drop
:type name: str
:param database: the database for which the continuous query is
dropped. Defaults to current client's database
:type database: str
| drop_continuous_query | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def send_packet(self, packet, protocol='json', time_precision=None):
"""Send an UDP packet.
:param packet: the packet to be sent
:type packet: (if protocol is 'json') dict
(if protocol is 'line') list of line protocol strings
:param protocol: protocol of input data, either 'json' or 'line'
:type protocol: str
:param time_precision: Either 's', 'm', 'ms' or 'u', defaults to None
:type time_precision: str
"""
if protocol == 'json':
data = make_lines(packet, time_precision).encode('utf-8')
elif protocol == 'line':
data = ('\n'.join(packet) + '\n').encode('utf-8')
self.udp_socket.sendto(data, (self._host, self._udp_port)) | Send an UDP packet.
:param packet: the packet to be sent
:type packet: (if protocol is 'json') dict
(if protocol is 'line') list of line protocol strings
:param protocol: protocol of input data, either 'json' or 'line'
:type protocol: str
:param time_precision: Either 's', 'm', 'ms' or 'u', defaults to None
:type time_precision: str
| send_packet | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def _parse_dsn(dsn):
"""Parse data source name.
This is a helper function to split the data source name provided in
the from_dsn classmethod
"""
conn_params = urlparse(dsn)
init_args = {}
scheme_info = conn_params.scheme.split('+')
if len(scheme_info) == 1:
scheme = scheme_info[0]
modifier = None
else:
modifier, scheme = scheme_info
if scheme != 'influxdb':
raise ValueError('Unknown scheme "{0}".'.format(scheme))
if modifier:
if modifier == 'udp':
init_args['use_udp'] = True
elif modifier == 'https':
init_args['ssl'] = True
else:
raise ValueError('Unknown modifier "{0}".'.format(modifier))
netlocs = conn_params.netloc.split(',')
init_args['hosts'] = []
for netloc in netlocs:
parsed = _parse_netloc(netloc)
init_args['hosts'].append((parsed['host'], int(parsed['port'])))
init_args['username'] = parsed['username']
init_args['password'] = parsed['password']
if conn_params.path and len(conn_params.path) > 1:
init_args['database'] = conn_params.path[1:]
return init_args | Parse data source name.
This is a helper function to split the data source name provided in
the from_dsn classmethod
| _parse_dsn | python | influxdata/influxdb-python | influxdb/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/client.py | MIT |
def __new__(cls, *args, **kwargs):
"""Initialize class attributes for subsequent constructor calls.
:note: *args and **kwargs are not explicitly used in this function,
but needed for Python 2 compatibility.
"""
if not cls.__initialized__:
cls.__initialized__ = True
try:
_meta = getattr(cls, 'Meta')
except AttributeError:
raise AttributeError(
'Missing Meta class in {0}.'.format(
cls.__name__))
for attr in ['series_name', 'fields', 'tags']:
try:
setattr(cls, '_' + attr, getattr(_meta, attr))
except AttributeError:
raise AttributeError(
'Missing {0} in {1} Meta class.'.format(
attr,
cls.__name__))
cls._autocommit = getattr(_meta, 'autocommit', False)
cls._time_precision = getattr(_meta, 'time_precision', None)
allowed_time_precisions = ['h', 'm', 's', 'ms', 'u', 'ns', None]
if cls._time_precision not in allowed_time_precisions:
raise AttributeError(
'In {}, time_precision is set, but invalid use any of {}.'
.format(cls.__name__, ','.join(allowed_time_precisions)))
cls._retention_policy = getattr(_meta, 'retention_policy', None)
cls._client = getattr(_meta, 'client', None)
if cls._autocommit and not cls._client:
raise AttributeError(
'In {0}, autocommit is set to True, but no client is set.'
.format(cls.__name__))
try:
cls._bulk_size = getattr(_meta, 'bulk_size')
if cls._bulk_size < 1 and cls._autocommit:
warn(
'Definition of bulk_size in {0} forced to 1, '
'was less than 1.'.format(cls.__name__))
cls._bulk_size = 1
except AttributeError:
cls._bulk_size = -1
else:
if not cls._autocommit:
warn(
'Definition of bulk_size in {0} has no affect because'
' autocommit is false.'.format(cls.__name__))
cls._datapoints = defaultdict(list)
if 'time' in cls._fields:
cls._fields.remove('time')
cls._type = namedtuple(cls.__name__,
['time'] + cls._tags + cls._fields)
cls._type.__new__.__defaults__ = (None,) * len(cls._fields)
return super(SeriesHelper, cls).__new__(cls) | Initialize class attributes for subsequent constructor calls.
:note: *args and **kwargs are not explicitly used in this function,
but needed for Python 2 compatibility.
| __new__ | python | influxdata/influxdb-python | influxdb/helper.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/helper.py | MIT |
def __init__(self, **kw):
"""Call to constructor creates a new data point.
:note: Data points written when `bulk_size` is reached per Helper.
:warning: Data points are *immutable* (`namedtuples`).
"""
cls = self.__class__
timestamp = kw.pop('time', self._current_timestamp())
tags = set(cls._tags)
fields = set(cls._fields)
keys = set(kw.keys())
# all tags should be passed, and keys - tags should be a subset of keys
if not (tags <= keys):
raise NameError(
'Expected arguments to contain all tags {0}, instead got {1}.'
.format(cls._tags, kw.keys()))
if not (keys - tags <= fields):
raise NameError('Got arguments not in tags or fields: {0}'
.format(keys - tags - fields))
cls._datapoints[cls._series_name.format(**kw)].append(
cls._type(time=timestamp, **kw)
)
if cls._autocommit and \
sum(len(series) for series in cls._datapoints.values()) \
>= cls._bulk_size:
cls.commit() | Call to constructor creates a new data point.
:note: Data points written when `bulk_size` is reached per Helper.
:warning: Data points are *immutable* (`namedtuples`).
| __init__ | python | influxdata/influxdb-python | influxdb/helper.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/helper.py | MIT |
def _json_body_(cls):
"""Return the JSON body of given datapoints.
:return: JSON body of these datapoints.
"""
json = []
if not cls.__initialized__:
cls._reset_()
for series_name, data in six.iteritems(cls._datapoints):
for point in data:
json_point = {
"measurement": series_name,
"fields": {},
"tags": {},
"time": getattr(point, "time")
}
for field in cls._fields:
value = getattr(point, field)
if value is not None:
json_point['fields'][field] = value
for tag in cls._tags:
json_point['tags'][tag] = getattr(point, tag)
json.append(json_point)
return json | Return the JSON body of given datapoints.
:return: JSON body of these datapoints.
| _json_body_ | python | influxdata/influxdb-python | influxdb/helper.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/helper.py | MIT |
def _get_unicode(data, force=False):
"""Try to return a text aka unicode object from the given data."""
if isinstance(data, binary_type):
return data.decode('utf-8')
if data is None:
return ''
if force:
if PY2:
return unicode(data)
return str(data)
return data | Try to return a text aka unicode object from the given data. | _get_unicode | python | influxdata/influxdb-python | influxdb/line_protocol.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/line_protocol.py | MIT |
def make_line(measurement, tags=None, fields=None, time=None, precision=None):
"""Extract the actual point from a given measurement line."""
tags = tags or {}
fields = fields or {}
line = _escape_tag(_get_unicode(measurement))
# tags should be sorted client-side to take load off server
tag_list = []
for tag_key in sorted(tags.keys()):
key = _escape_tag(tag_key)
value = _escape_tag(tags[tag_key])
if key != '' and value != '':
tag_list.append(
"{key}={value}".format(key=key, value=value)
)
if tag_list:
line += ',' + ','.join(tag_list)
field_list = []
for field_key in sorted(fields.keys()):
key = _escape_tag(field_key)
value = _escape_value(fields[field_key])
if key != '' and value != '':
field_list.append("{key}={value}".format(
key=key,
value=value
))
if field_list:
line += ' ' + ','.join(field_list)
if time is not None:
timestamp = _get_unicode(str(int(
_convert_timestamp(time, precision)
)))
line += ' ' + timestamp
return line | Extract the actual point from a given measurement line. | make_line | python | influxdata/influxdb-python | influxdb/line_protocol.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/line_protocol.py | MIT |
def make_lines(data, precision=None):
"""Extract points from given dict.
Extracts the points from the given dict and returns a Unicode string
matching the line protocol introduced in InfluxDB 0.9.0.
"""
lines = []
static_tags = data.get('tags')
for point in data['points']:
if static_tags:
tags = dict(static_tags) # make a copy, since we'll modify
tags.update(point.get('tags') or {})
else:
tags = point.get('tags') or {}
line = make_line(
point.get('measurement', data.get('measurement')),
tags=tags,
fields=point.get('fields'),
precision=precision,
time=point.get('time')
)
lines.append(line)
return '\n'.join(lines) + '\n' | Extract points from given dict.
Extracts the points from the given dict and returns a Unicode string
matching the line protocol introduced in InfluxDB 0.9.0.
| make_lines | python | influxdata/influxdb-python | influxdb/line_protocol.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/line_protocol.py | MIT |
def __getitem__(self, key):
"""Retrieve the series name or specific set based on key.
:param key: Either a series name, or a tags_dict, or
a 2-tuple(series_name, tags_dict).
If the series name is None (or not given) then any serie
matching the eventual given tags will be given its points
one after the other.
To get the points of every series in this resultset then
you have to provide None as key.
:return: A generator yielding `Point`s matching the given key.
NB:
The order in which the points are yielded is actually undefined but
it might change..
"""
warnings.warn(
("ResultSet's ``__getitem__`` method will be deprecated. Use"
"``get_points`` instead."),
DeprecationWarning
)
if isinstance(key, tuple):
if len(key) != 2:
raise TypeError('only 2-tuples allowed')
name = key[0]
tags = key[1]
if not isinstance(tags, dict) and tags is not None:
raise TypeError('tags should be a dict')
elif isinstance(key, dict):
name = None
tags = key
else:
name = key
tags = None
return self.get_points(name, tags) | Retrieve the series name or specific set based on key.
:param key: Either a series name, or a tags_dict, or
a 2-tuple(series_name, tags_dict).
If the series name is None (or not given) then any serie
matching the eventual given tags will be given its points
one after the other.
To get the points of every series in this resultset then
you have to provide None as key.
:return: A generator yielding `Point`s matching the given key.
NB:
The order in which the points are yielded is actually undefined but
it might change..
| __getitem__ | python | influxdata/influxdb-python | influxdb/resultset.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/resultset.py | MIT |
def get_points(self, measurement=None, tags=None):
"""Return a generator for all the points that match the given filters.
:param measurement: The measurement name
:type measurement: str
:param tags: Tags to look for
:type tags: dict
:return: Points generator
"""
# Raise error if measurement is not str or bytes
if not isinstance(measurement,
(bytes, type(b''.decode()), type(None))):
raise TypeError('measurement must be an str or None')
for series in self._get_series():
series_name = series.get('measurement',
series.get('name', 'results'))
if series_name is None:
# this is a "system" query or a query which
# doesn't return a name attribute.
# like 'show retention policies' ..
if tags is None:
for item in self._get_points_for_series(series):
yield item
elif measurement in (None, series_name):
# by default if no tags was provided then
# we will matches every returned series
series_tags = series.get('tags', {})
for item in self._get_points_for_series(series):
if tags is None or \
self._tag_matches(item, tags) or \
self._tag_matches(series_tags, tags):
yield item | Return a generator for all the points that match the given filters.
:param measurement: The measurement name
:type measurement: str
:param tags: Tags to look for
:type tags: dict
:return: Points generator
| get_points | python | influxdata/influxdb-python | influxdb/resultset.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/resultset.py | MIT |
def _tag_matches(tags, filter):
"""Check if all key/values in filter match in tags."""
for tag_name, tag_value in filter.items():
# using _sentinel as I'm not sure that "None"
# could be used, because it could be a valid
# series_tags value : when a series has no such tag
# then I think it's set to /null/None/.. TBC..
series_tag_value = tags.get(tag_name, _sentinel)
if series_tag_value != tag_value:
return False
return True | Check if all key/values in filter match in tags. | _tag_matches | python | influxdata/influxdb-python | influxdb/resultset.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/resultset.py | MIT |
def keys(self):
"""Return the list of keys in the ResultSet.
:return: List of keys. Keys are tuples (series_name, tags)
"""
keys = []
for series in self._get_series():
keys.append(
(series.get('measurement',
series.get('name', 'results')),
series.get('tags', None))
)
return keys | Return the list of keys in the ResultSet.
:return: List of keys. Keys are tuples (series_name, tags)
| keys | python | influxdata/influxdb-python | influxdb/resultset.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/resultset.py | MIT |
def items(self):
"""Return the set of items from the ResultSet.
:return: List of tuples, (key, generator)
"""
items = []
for series in self._get_series():
series_key = (series.get('measurement',
series.get('name', 'results')),
series.get('tags', None))
items.append(
(series_key, self._get_points_for_series(series))
)
return items | Return the set of items from the ResultSet.
:return: List of tuples, (key, generator)
| items | python | influxdata/influxdb-python | influxdb/resultset.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/resultset.py | MIT |
def _get_points_for_series(self, series):
"""Return generator of dict from columns and values of a series.
:param series: One series
:return: Generator of dicts
"""
for point in series.get('values', []):
yield self.point_from_cols_vals(
series['columns'],
point
) | Return generator of dict from columns and values of a series.
:param series: One series
:return: Generator of dicts
| _get_points_for_series | python | influxdata/influxdb-python | influxdb/resultset.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/resultset.py | MIT |
def point_from_cols_vals(cols, vals):
"""Create a dict from columns and values lists.
:param cols: List of columns
:param vals: List of values
:return: Dict where keys are columns.
"""
point = {}
for col_index, col_name in enumerate(cols):
point[col_name] = vals[col_index]
return point | Create a dict from columns and values lists.
:param cols: List of columns
:param vals: List of values
:return: Dict where keys are columns.
| point_from_cols_vals | python | influxdata/influxdb-python | influxdb/resultset.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/resultset.py | MIT |
def write_points(self,
dataframe,
measurement,
tags=None,
tag_columns=None,
field_columns=None,
time_precision=None,
database=None,
retention_policy=None,
batch_size=None,
protocol='line',
numeric_precision=None):
"""Write to multiple time series names.
:param dataframe: data points in a DataFrame
:param measurement: name of measurement
:param tags: dictionary of tags, with string key-values
:param tag_columns: [Optional, default None] List of data tag names
:param field_columns: [Options, default None] List of data field names
:param time_precision: [Optional, default None] Either 's', 'ms', 'u'
or 'n'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
:param protocol: Protocol for writing data. Either 'line' or 'json'.
:param numeric_precision: Precision for floating point values.
Either None, 'full' or some int, where int is the desired decimal
precision. 'full' preserves full precision for int and float
datatypes. Defaults to None, which preserves 14-15 significant
figures for float and all significant figures for int datatypes.
"""
if tag_columns is None:
tag_columns = []
if field_columns is None:
field_columns = []
if batch_size:
number_batches = int(math.ceil(len(dataframe) / float(batch_size)))
for batch in range(number_batches):
start_index = batch * batch_size
end_index = (batch + 1) * batch_size
if protocol == 'line':
points = self._convert_dataframe_to_lines(
dataframe.iloc[start_index:end_index].copy(),
measurement=measurement,
global_tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns,
numeric_precision=numeric_precision)
else:
points = self._convert_dataframe_to_json(
dataframe.iloc[start_index:end_index].copy(),
measurement=measurement,
tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns)
super(DataFrameClient, self).write_points(
points,
time_precision,
database,
retention_policy,
protocol=protocol)
return True
if protocol == 'line':
points = self._convert_dataframe_to_lines(
dataframe,
measurement=measurement,
global_tags=tags,
tag_columns=tag_columns,
field_columns=field_columns,
time_precision=time_precision,
numeric_precision=numeric_precision)
else:
points = self._convert_dataframe_to_json(
dataframe,
measurement=measurement,
tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns)
super(DataFrameClient, self).write_points(
points,
time_precision,
database,
retention_policy,
protocol=protocol)
return True | Write to multiple time series names.
:param dataframe: data points in a DataFrame
:param measurement: name of measurement
:param tags: dictionary of tags, with string key-values
:param tag_columns: [Optional, default None] List of data tag names
:param field_columns: [Options, default None] List of data field names
:param time_precision: [Optional, default None] Either 's', 'ms', 'u'
or 'n'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
:param protocol: Protocol for writing data. Either 'line' or 'json'.
:param numeric_precision: Precision for floating point values.
Either None, 'full' or some int, where int is the desired decimal
precision. 'full' preserves full precision for int and float
datatypes. Defaults to None, which preserves 14-15 significant
figures for float and all significant figures for int datatypes.
| write_points | python | influxdata/influxdb-python | influxdb/_dataframe_client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/_dataframe_client.py | MIT |
def query(self,
query,
params=None,
bind_params=None,
epoch=None,
expected_response_code=200,
database=None,
raise_errors=True,
chunked=False,
chunk_size=0,
method="GET",
dropna=True,
data_frame_index=None):
"""
Query data into a DataFrame.
.. danger::
In order to avoid injection vulnerabilities (similar to `SQL
injection <https://www.owasp.org/index.php/SQL_Injection>`_
vulnerabilities), do not directly include untrusted data into the
``query`` parameter, use ``bind_params`` instead.
:param query: the actual query string
:param params: additional parameters for the request, defaults to {}
:param bind_params: bind parameters for the query:
any variable in the query written as ``'$var_name'`` will be
replaced with ``bind_params['var_name']``. Only works in the
``WHERE`` clause and takes precedence over ``params['params']``
:param epoch: response timestamps to be in epoch format either 'h',
'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is
RFC3339 UTC format with nanosecond precision
:param expected_response_code: the expected status code of response,
defaults to 200
:param database: database to query, defaults to None
:param raise_errors: Whether or not to raise exceptions when InfluxDB
returns errors, defaults to True
:param chunked: Enable to use chunked responses from InfluxDB.
With ``chunked`` enabled, one ResultSet is returned per chunk
containing all results within that chunk
:param chunk_size: Size of each chunk to tell InfluxDB to use.
:param dropna: drop columns where all values are missing
:param data_frame_index: the list of columns that
are used as DataFrame index
:returns: the queried data
:rtype: :class:`~.ResultSet`
"""
query_args = dict(params=params,
bind_params=bind_params,
epoch=epoch,
expected_response_code=expected_response_code,
raise_errors=raise_errors,
chunked=chunked,
database=database,
method=method,
chunk_size=chunk_size)
results = super(DataFrameClient, self).query(query, **query_args)
if query.strip().upper().startswith("SELECT"):
if len(results) > 0:
return self._to_dataframe(results, dropna,
data_frame_index=data_frame_index)
else:
return {}
else:
return results |
Query data into a DataFrame.
.. danger::
In order to avoid injection vulnerabilities (similar to `SQL
injection <https://www.owasp.org/index.php/SQL_Injection>`_
vulnerabilities), do not directly include untrusted data into the
``query`` parameter, use ``bind_params`` instead.
:param query: the actual query string
:param params: additional parameters for the request, defaults to {}
:param bind_params: bind parameters for the query:
any variable in the query written as ``'$var_name'`` will be
replaced with ``bind_params['var_name']``. Only works in the
``WHERE`` clause and takes precedence over ``params['params']``
:param epoch: response timestamps to be in epoch format either 'h',
'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is
RFC3339 UTC format with nanosecond precision
:param expected_response_code: the expected status code of response,
defaults to 200
:param database: database to query, defaults to None
:param raise_errors: Whether or not to raise exceptions when InfluxDB
returns errors, defaults to True
:param chunked: Enable to use chunked responses from InfluxDB.
With ``chunked`` enabled, one ResultSet is returned per chunk
containing all results within that chunk
:param chunk_size: Size of each chunk to tell InfluxDB to use.
:param dropna: drop columns where all values are missing
:param data_frame_index: the list of columns that
are used as DataFrame index
:returns: the queried data
:rtype: :class:`~.ResultSet`
| query | python | influxdata/influxdb-python | influxdb/_dataframe_client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/_dataframe_client.py | MIT |
def loads(s):
"""Generate a sequence of JSON values from a string."""
_decoder = json.JSONDecoder()
while s:
s = s.strip()
obj, pos = _decoder.raw_decode(s)
if not pos:
raise ValueError('no JSON object found at %i' % pos)
yield obj
s = s[pos:] | Generate a sequence of JSON values from a string. | loads | python | influxdata/influxdb-python | influxdb/influxdb08/chunked_json.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/chunked_json.py | MIT |
def from_dsn(dsn, **kwargs):
r"""Return an instaance of InfluxDBClient from given data source name.
Returns an instance of InfluxDBClient from the provided data source
name. Supported schemes are "influxdb", "https+influxdb",
"udp+influxdb". Parameters for the InfluxDBClient constructor may be
also be passed to this function.
Examples:
>> cli = InfluxDBClient.from_dsn('influxdb://username:password@\
... localhost:8086/databasename', timeout=5)
>> type(cli)
<class 'influxdb.client.InfluxDBClient'>
>> cli = InfluxDBClient.from_dsn('udp+influxdb://username:pass@\
... localhost:8086/databasename', timeout=5, udp_port=159)
>> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli))
http://localhost:8086 - True 159
:param dsn: data source name
:type dsn: string
:param **kwargs: additional parameters for InfluxDBClient.
:type **kwargs: dict
:note: parameters provided in **kwargs may override dsn parameters.
:note: when using "udp+influxdb" the specified port (if any) will be
used for the TCP connection; specify the udp port with the additional
udp_port parameter (cf. examples).
:raise ValueError: if the provided DSN has any unexpected value.
"""
init_args = {}
conn_params = urlparse(dsn)
scheme_info = conn_params.scheme.split('+')
if len(scheme_info) == 1:
scheme = scheme_info[0]
modifier = None
else:
modifier, scheme = scheme_info
if scheme != 'influxdb':
raise ValueError('Unknown scheme "{0}".'.format(scheme))
if modifier:
if modifier == 'udp':
init_args['use_udp'] = True
elif modifier == 'https':
init_args['ssl'] = True
else:
raise ValueError('Unknown modifier "{0}".'.format(modifier))
if conn_params.hostname:
init_args['host'] = conn_params.hostname
if conn_params.port:
init_args['port'] = conn_params.port
if conn_params.username:
init_args['username'] = conn_params.username
if conn_params.password:
init_args['password'] = conn_params.password
if conn_params.path and len(conn_params.path) > 1:
init_args['database'] = conn_params.path[1:]
init_args.update(kwargs)
return InfluxDBClient(**init_args) | Return an instaance of InfluxDBClient from given data source name.
Returns an instance of InfluxDBClient from the provided data source
name. Supported schemes are "influxdb", "https+influxdb",
"udp+influxdb". Parameters for the InfluxDBClient constructor may be
also be passed to this function.
Examples:
>> cli = InfluxDBClient.from_dsn('influxdb://username:password@\
... localhost:8086/databasename', timeout=5)
>> type(cli)
<class 'influxdb.client.InfluxDBClient'>
>> cli = InfluxDBClient.from_dsn('udp+influxdb://username:pass@\
... localhost:8086/databasename', timeout=5, udp_port=159)
>> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli))
http://localhost:8086 - True 159
:param dsn: data source name
:type dsn: string
:param **kwargs: additional parameters for InfluxDBClient.
:type **kwargs: dict
:note: parameters provided in **kwargs may override dsn parameters.
:note: when using "udp+influxdb" the specified port (if any) will be
used for the TCP connection; specify the udp port with the additional
udp_port parameter (cf. examples).
:raise ValueError: if the provided DSN has any unexpected value.
| from_dsn | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def switch_user(self, username, password):
"""Change client username.
:param username: the new username to switch to
:type username: string
:param password: the new password to switch to
:type password: string
"""
self._username = username
self._password = password | Change client username.
:param username: the new username to switch to
:type username: string
:param password: the new password to switch to
:type password: string
| switch_user | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def write(self, data):
"""Provide as convenience for influxdb v0.9.0, this may change."""
self.request(
url="write",
method='POST',
params=None,
data=data,
expected_response_code=200
)
return True | Provide as convenience for influxdb v0.9.0, this may change. | write | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def write_points(self, data, time_precision='s', *args, **kwargs):
"""Write to multiple time series names.
An example data blob is:
data = [
{
"points": [
[
12
]
],
"name": "cpu_load_short",
"columns": [
"value"
]
}
]
:param data: A list of dicts in InfluxDB 0.8.x data format.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
def list_chunks(data_list, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(data_list), n):
yield data_list[i:i + n]
batch_size = kwargs.get('batch_size')
if batch_size and batch_size > 0:
for item in data:
name = item.get('name')
columns = item.get('columns')
point_list = item.get('points', [])
for batch in list_chunks(point_list, batch_size):
item = [{
"points": batch,
"name": name,
"columns": columns
}]
self._write_points(
data=item,
time_precision=time_precision)
return True
return self._write_points(data=data,
time_precision=time_precision) | Write to multiple time series names.
An example data blob is:
data = [
{
"points": [
[
12
]
],
"name": "cpu_load_short",
"columns": [
"value"
]
}
]
:param data: A list of dicts in InfluxDB 0.8.x data format.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
| write_points | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def write_points_with_precision(self, data, time_precision='s'):
"""Write to multiple time series names.
DEPRECATED.
"""
warnings.warn(
"write_points_with_precision is deprecated, and will be removed "
"in future versions. Please use "
"``InfluxDBClient.write_points(time_precision='..')`` instead.",
FutureWarning)
return self._write_points(data=data, time_precision=time_precision) | Write to multiple time series names.
DEPRECATED.
| write_points_with_precision | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def create_database(self, database):
"""Create a database on the InfluxDB server.
:param database: the name of the database to create
:type database: string
:rtype: boolean
"""
url = "db"
data = {'name': database}
self.request(
url=url,
method='POST',
data=data,
expected_response_code=201
)
return True | Create a database on the InfluxDB server.
:param database: the name of the database to create
:type database: string
:rtype: boolean
| create_database | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def delete_database(self, database):
"""Drop a database on the InfluxDB server.
:param database: the name of the database to delete
:type database: string
:rtype: boolean
"""
url = "db/{0}".format(database)
self.request(
url=url,
method='DELETE',
expected_response_code=204
)
return True | Drop a database on the InfluxDB server.
:param database: the name of the database to delete
:type database: string
:rtype: boolean
| delete_database | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def get_database_list(self):
"""Get the list of databases.
DEPRECATED.
"""
warnings.warn(
"get_database_list is deprecated, and will be removed "
"in future versions. Please use "
"``InfluxDBClient.get_list_database`` instead.",
FutureWarning)
return self.get_list_database() | Get the list of databases.
DEPRECATED.
| get_database_list | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def delete_series(self, series):
"""Drop a series on the InfluxDB server.
:param series: the name of the series to delete
:type series: string
:rtype: boolean
"""
url = "db/{0}/series/{1}".format(
self._database,
series
)
self.request(
url=url,
method='DELETE',
expected_response_code=204
)
return True | Drop a series on the InfluxDB server.
:param series: the name of the series to delete
:type series: string
:rtype: boolean
| delete_series | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def get_list_series(self):
"""Get a list of all time series in a database."""
response = self._query('list series')
return [series[1] for series in response[0]['points']] | Get a list of all time series in a database. | get_list_series | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def add_database_user(self, new_username, new_password, permissions=None):
"""Add database user.
:param permissions: A ``(readFrom, writeTo)`` tuple
"""
url = "db/{0}/users".format(self._database)
data = {
'name': new_username,
'password': new_password
}
if permissions:
try:
data['readFrom'], data['writeTo'] = permissions
except (ValueError, TypeError):
raise TypeError(
"'permissions' must be (readFrom, writeTo) tuple"
)
self.request(
url=url,
method='POST',
data=data,
expected_response_code=200
)
return True | Add database user.
:param permissions: A ``(readFrom, writeTo)`` tuple
| add_database_user | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def alter_database_user(self, username, password=None, permissions=None):
"""Alter a database user and/or their permissions.
:param permissions: A ``(readFrom, writeTo)`` tuple
:raise TypeError: if permissions cannot be read.
:raise ValueError: if neither password nor permissions provided.
"""
url = "db/{0}/users/{1}".format(self._database, username)
if not password and not permissions:
raise ValueError("Nothing to alter for user {0}.".format(username))
data = {}
if password:
data['password'] = password
if permissions:
try:
data['readFrom'], data['writeTo'] = permissions
except (ValueError, TypeError):
raise TypeError(
"'permissions' must be (readFrom, writeTo) tuple"
)
self.request(
url=url,
method='POST',
data=data,
expected_response_code=200
)
if username == self._username:
self._password = password
return True | Alter a database user and/or their permissions.
:param permissions: A ``(readFrom, writeTo)`` tuple
:raise TypeError: if permissions cannot be read.
:raise ValueError: if neither password nor permissions provided.
| alter_database_user | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def send_packet(self, packet):
"""Send a UDP packet along the wire."""
data = json.dumps(packet)
byte = data.encode('utf-8')
self.udp_socket.sendto(byte, (self._host, self._udp_port)) | Send a UDP packet along the wire. | send_packet | python | influxdata/influxdb-python | influxdb/influxdb08/client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/client.py | MIT |
def __init__(self, ignore_nan=True, *args, **kwargs):
"""Initialize an instance of the DataFrameClient."""
super(DataFrameClient, self).__init__(*args, **kwargs)
try:
global pd
import pandas as pd
except ImportError as ex:
raise ImportError('DataFrameClient requires Pandas, '
'"{ex}" problem importing'.format(ex=str(ex)))
self.EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00')
self.ignore_nan = ignore_nan | Initialize an instance of the DataFrameClient. | __init__ | python | influxdata/influxdb-python | influxdb/influxdb08/dataframe_client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/dataframe_client.py | MIT |
def write_points(self, data, *args, **kwargs):
"""Write to multiple time series names.
:param data: A dictionary mapping series names to pandas DataFrames
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
batch_size = kwargs.get('batch_size')
time_precision = kwargs.get('time_precision', 's')
if batch_size:
kwargs.pop('batch_size') # don't hand over to InfluxDBClient
for key, data_frame in data.items():
number_batches = int(math.ceil(
len(data_frame) / float(batch_size)))
for batch in range(number_batches):
start_index = batch * batch_size
end_index = (batch + 1) * batch_size
outdata = [
self._convert_dataframe_to_json(
name=key,
dataframe=data_frame
.iloc[start_index:end_index].copy(),
time_precision=time_precision)]
InfluxDBClient.write_points(self, outdata, *args, **kwargs)
return True
outdata = [
self._convert_dataframe_to_json(name=key, dataframe=dataframe,
time_precision=time_precision)
for key, dataframe in data.items()]
return InfluxDBClient.write_points(self, outdata, *args, **kwargs) | Write to multiple time series names.
:param data: A dictionary mapping series names to pandas DataFrames
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
| write_points | python | influxdata/influxdb-python | influxdb/influxdb08/dataframe_client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/dataframe_client.py | MIT |
def write_points_with_precision(self, data, time_precision='s'):
"""Write to multiple time series names.
DEPRECATED
"""
warnings.warn(
"write_points_with_precision is deprecated, and will be removed "
"in future versions. Please use "
"``DataFrameClient.write_points(time_precision='..')`` instead.",
FutureWarning)
return self.write_points(data, time_precision='s') | Write to multiple time series names.
DEPRECATED
| write_points_with_precision | python | influxdata/influxdb-python | influxdb/influxdb08/dataframe_client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/dataframe_client.py | MIT |
def query(self, query, time_precision='s', chunked=False):
"""Query data into DataFrames.
Returns a DataFrame for a single time series and a map for multiple
time series with the time series as value and its name as key.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise.
"""
result = InfluxDBClient.query(self, query=query,
time_precision=time_precision,
chunked=chunked)
if len(result) == 0:
return result
elif len(result) == 1:
return self._to_dataframe(result[0], time_precision)
else:
ret = {}
for time_series in result:
ret[time_series['name']] = self._to_dataframe(time_series,
time_precision)
return ret | Query data into DataFrames.
Returns a DataFrame for a single time series and a map for multiple
time series with the time series as value and its name as key.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise.
| query | python | influxdata/influxdb-python | influxdb/influxdb08/dataframe_client.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/dataframe_client.py | MIT |
def __new__(cls, *args, **kwargs):
"""Initialize class attributes for subsequent constructor calls.
:note: *args and **kwargs are not explicitly used in this function,
but needed for Python 2 compatibility.
"""
if not cls.__initialized__:
cls.__initialized__ = True
try:
_meta = getattr(cls, 'Meta')
except AttributeError:
raise AttributeError(
'Missing Meta class in {0}.'.format(
cls.__name__))
for attr in ['series_name', 'fields']:
try:
setattr(cls, '_' + attr, getattr(_meta, attr))
except AttributeError:
raise AttributeError(
'Missing {0} in {1} Meta class.'.format(
attr,
cls.__name__))
cls._autocommit = getattr(_meta, 'autocommit', False)
cls._client = getattr(_meta, 'client', None)
if cls._autocommit and not cls._client:
raise AttributeError(
'In {0}, autocommit is set to True, but no client is set.'
.format(cls.__name__))
try:
cls._bulk_size = getattr(_meta, 'bulk_size')
if cls._bulk_size < 1 and cls._autocommit:
warn(
'Definition of bulk_size in {0} forced to 1, '
'was less than 1.'.format(cls.__name__))
cls._bulk_size = 1
except AttributeError:
cls._bulk_size = -1
else:
if not cls._autocommit:
warn(
'Definition of bulk_size in {0} has no affect because'
' autocommit is false.'.format(cls.__name__))
cls._datapoints = defaultdict(list)
cls._type = namedtuple(cls.__name__, cls._fields)
return super(SeriesHelper, cls).__new__(cls) | Initialize class attributes for subsequent constructor calls.
:note: *args and **kwargs are not explicitly used in this function,
but needed for Python 2 compatibility.
| __new__ | python | influxdata/influxdb-python | influxdb/influxdb08/helper.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/helper.py | MIT |
def __init__(self, **kw):
"""Create a new data point.
All fields must be present.
:note: Data points written when `bulk_size` is reached per Helper.
:warning: Data points are *immutable* (`namedtuples`).
"""
cls = self.__class__
if sorted(cls._fields) != sorted(kw.keys()):
raise NameError(
'Expected {0}, got {1}.'.format(
cls._fields,
kw.keys()))
cls._datapoints[cls._series_name.format(**kw)].append(cls._type(**kw))
if cls._autocommit and \
sum(len(series) for series in cls._datapoints.values()) \
>= cls._bulk_size:
cls.commit() | Create a new data point.
All fields must be present.
:note: Data points written when `bulk_size` is reached per Helper.
:warning: Data points are *immutable* (`namedtuples`).
| __init__ | python | influxdata/influxdb-python | influxdb/influxdb08/helper.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/helper.py | MIT |
def _json_body_(cls):
"""Return JSON body of the datapoints.
:return: JSON body of the datapoints.
"""
json = []
if not cls.__initialized__:
cls._reset_()
for series_name, data in six.iteritems(cls._datapoints):
json.append({'name': series_name,
'columns': cls._fields,
'points': [[getattr(point, k) for k in cls._fields]
for point in data]
})
return json | Return JSON body of the datapoints.
:return: JSON body of the datapoints.
| _json_body_ | python | influxdata/influxdb-python | influxdb/influxdb08/helper.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/influxdb08/helper.py | MIT |
def test_load(self):
"""Test reading a sequence of JSON values from a string."""
example_response = \
'{"results": [{"series": [{"measurement": "sdfsdfsdf", ' \
'"columns": ["time", "value"], "values": ' \
'[["2009-11-10T23:00:00Z", 0.64]]}]}, {"series": ' \
'[{"measurement": "cpu_load_short", "columns": ["time", "value"],'\
'"values": [["2009-11-10T23:00:00Z", 0.64]]}]}]}'
res = list(chunked_json.loads(example_response))
# import ipdb; ipdb.set_trace()
self.assertListEqual(
[
{
'results': [
{'series': [{
'values': [['2009-11-10T23:00:00Z', 0.64]],
'measurement': 'sdfsdfsdf',
'columns':
['time', 'value']}]},
{'series': [{
'values': [['2009-11-10T23:00:00Z', 0.64]],
'measurement': 'cpu_load_short',
'columns': ['time', 'value']}]}
]
}
],
res
) | Test reading a sequence of JSON values from a string. | test_load | python | influxdata/influxdb-python | influxdb/tests/chunked_json_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/chunked_json_test.py | MIT |
def request(*args, **kwargs):
"""Request content from the mocked session."""
c = content
# Check method
assert method == kwargs.get('method', 'GET')
if method == 'POST':
data = kwargs.get('data', None)
if data is not None:
# Data must be a string
assert isinstance(data, str)
# Data must be a JSON string
assert c == json.loads(data, strict=True)
c = data
# Anyway, Content must be a JSON string (or empty string)
if not isinstance(c, str):
c = json.dumps(c)
return _build_response_object(status_code=status_code, content=c) | Request content from the mocked session. | request | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def setUp(self):
"""Initialize an instance of TestInfluxDBClient object."""
# By default, raise exceptions on warnings
warnings.simplefilter('error', FutureWarning)
self.cli = InfluxDBClient('localhost', 8086, 'username', 'password')
self.dummy_points = [
{
"measurement": "cpu_load_short",
"tags": {
"host": "server01",
"region": "us-west"
},
"time": "2009-11-10T23:00:00.123456Z",
"fields": {
"value": 0.64
}
}
]
self.dsn_string = 'influxdb://uSr:pWd@my.host.fr:1886/db' | Initialize an instance of TestInfluxDBClient object. | setUp | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_scheme(self):
"""Set up the test schema for TestInfluxDBClient object."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'database')
self.assertEqual('http://host:8086', cli._baseurl)
cli = InfluxDBClient(
'host', 8086, 'username', 'password', 'database', ssl=True
)
self.assertEqual('https://host:8086', cli._baseurl)
cli = InfluxDBClient(
'host', 8086, 'username', 'password', 'database', ssl=True,
path="somepath"
)
self.assertEqual('https://host:8086/somepath', cli._baseurl)
cli = InfluxDBClient(
'host', 8086, 'username', 'password', 'database', ssl=True,
path=None
)
self.assertEqual('https://host:8086', cli._baseurl)
cli = InfluxDBClient(
'host', 8086, 'username', 'password', 'database', ssl=True,
path="/somepath"
)
self.assertEqual('https://host:8086/somepath', cli._baseurl) | Set up the test schema for TestInfluxDBClient object. | test_scheme | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_dsn(self):
"""Set up the test datasource name for TestInfluxDBClient object."""
cli = InfluxDBClient.from_dsn('influxdb://192.168.0.1:1886')
self.assertEqual('http://192.168.0.1:1886', cli._baseurl)
cli = InfluxDBClient.from_dsn(self.dsn_string)
self.assertEqual('http://my.host.fr:1886', cli._baseurl)
self.assertEqual('uSr', cli._username)
self.assertEqual('pWd', cli._password)
self.assertEqual('db', cli._database)
self.assertFalse(cli._use_udp)
cli = InfluxDBClient.from_dsn('udp+' + self.dsn_string)
self.assertTrue(cli._use_udp)
cli = InfluxDBClient.from_dsn('https+' + self.dsn_string)
self.assertEqual('https://my.host.fr:1886', cli._baseurl)
cli = InfluxDBClient.from_dsn('https+' + self.dsn_string,
**{'ssl': False})
self.assertEqual('http://my.host.fr:1886', cli._baseurl) | Set up the test datasource name for TestInfluxDBClient object. | test_dsn | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_cert(self):
"""Test mutual TLS authentication for TestInfluxDBClient object."""
cli = InfluxDBClient(ssl=True, cert='/etc/pki/tls/private/dummy.crt')
self.assertEqual(cli._session.cert, '/etc/pki/tls/private/dummy.crt')
with self.assertRaises(ValueError):
cli = InfluxDBClient(cert='/etc/pki/tls/private/dummy.crt') | Test mutual TLS authentication for TestInfluxDBClient object. | test_cert | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_switch_database(self):
"""Test switch database in TestInfluxDBClient object."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'database')
cli.switch_database('another_database')
self.assertEqual('another_database', cli._database) | Test switch database in TestInfluxDBClient object. | test_switch_database | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_switch_user(self):
"""Test switch user in TestInfluxDBClient object."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'database')
cli.switch_user('another_username', 'another_password')
self.assertEqual('another_username', cli._username)
self.assertEqual('another_password', cli._password) | Test switch user in TestInfluxDBClient object. | test_switch_user | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points(self):
"""Test write points for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
"http://localhost:8086/write",
status_code=204
)
cli = InfluxDBClient(database='db')
cli.write_points(
self.dummy_points,
)
self.assertEqual(
'cpu_load_short,host=server01,region=us-west '
'value=0.64 1257894000123456000\n',
m.last_request.body.decode('utf-8'),
) | Test write points for TestInfluxDBClient object. | test_write_points | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_gzip(self):
"""Test write points for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
"http://localhost:8086/write",
status_code=204
)
cli = InfluxDBClient(database='db', gzip=True)
cli.write_points(
self.dummy_points,
)
compressed = io.BytesIO()
with gzip.GzipFile(
compresslevel=9,
fileobj=compressed,
mode='w'
) as f:
f.write(
b'cpu_load_short,host=server01,region=us-west '
b'value=0.64 1257894000123456000\n'
)
self.assertEqual(
m.last_request.body,
compressed.getvalue(),
) | Test write points for TestInfluxDBClient object. | test_write_points_gzip | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_toplevel_attributes(self):
"""Test write points attrs for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
"http://localhost:8086/write",
status_code=204
)
cli = InfluxDBClient(database='db')
cli.write_points(
self.dummy_points,
database='testdb',
tags={"tag": "hello"},
retention_policy="somepolicy"
)
self.assertEqual(
'cpu_load_short,host=server01,region=us-west,tag=hello '
'value=0.64 1257894000123456000\n',
m.last_request.body.decode('utf-8'),
) | Test write points attrs for TestInfluxDBClient object. | test_write_points_toplevel_attributes | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_batch(self):
"""Test write points batch for TestInfluxDBClient object."""
dummy_points = [
{"measurement": "cpu_usage", "tags": {"unit": "percent"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 12.34}},
{"measurement": "network", "tags": {"direction": "in"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 123.00}},
{"measurement": "network", "tags": {"direction": "out"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 12.00}}
]
expected_last_body = (
"network,direction=out,host=server01,region=us-west "
"value=12.0 1257894000000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = InfluxDBClient(database='db')
cli.write_points(points=dummy_points,
database='db',
tags={"host": "server01",
"region": "us-west"},
batch_size=2)
self.assertEqual(m.call_count, 2)
self.assertEqual(expected_last_body,
m.last_request.body.decode('utf-8')) | Test write points batch for TestInfluxDBClient object. | test_write_points_batch | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_batch_generator(self):
"""Test write points batch from a generator for TestInfluxDBClient."""
dummy_points = [
{"measurement": "cpu_usage", "tags": {"unit": "percent"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 12.34}},
{"measurement": "network", "tags": {"direction": "in"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 123.00}},
{"measurement": "network", "tags": {"direction": "out"},
"time": "2009-11-10T23:00:00Z", "fields": {"value": 12.00}}
]
dummy_points_generator = (point for point in dummy_points)
expected_last_body = (
"network,direction=out,host=server01,region=us-west "
"value=12.0 1257894000000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = InfluxDBClient(database='db')
cli.write_points(points=dummy_points_generator,
database='db',
tags={"host": "server01",
"region": "us-west"},
batch_size=2)
self.assertEqual(m.call_count, 2)
self.assertEqual(expected_last_body,
m.last_request.body.decode('utf-8')) | Test write points batch from a generator for TestInfluxDBClient. | test_write_points_batch_generator | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_udp(self):
"""Test write points UDP for TestInfluxDBClient object."""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = random.randint(4000, 8000)
s.bind(('0.0.0.0', port))
cli = InfluxDBClient(
'localhost', 8086, 'root', 'root',
'test', use_udp=True, udp_port=port
)
cli.write_points(self.dummy_points)
received_data, addr = s.recvfrom(1024)
self.assertEqual(
'cpu_load_short,host=server01,region=us-west '
'value=0.64 1257894000123456000\n',
received_data.decode()
) | Test write points UDP for TestInfluxDBClient object. | test_write_points_udp | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_fails(self):
"""Test write points fail for TestInfluxDBClient object."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
with _mocked_session(cli, 'post', 500):
cli.write_points([]) | Test write points fail for TestInfluxDBClient object. | test_write_points_fails | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_with_precision(self):
"""Test write points with precision for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
"http://localhost:8086/write",
status_code=204
)
cli = InfluxDBClient(database='db')
cli.write_points(self.dummy_points, time_precision='n')
self.assertEqual(
b'cpu_load_short,host=server01,region=us-west '
b'value=0.64 1257894000123456000\n',
m.last_request.body,
)
cli.write_points(self.dummy_points, time_precision='u')
self.assertEqual(
b'cpu_load_short,host=server01,region=us-west '
b'value=0.64 1257894000123456\n',
m.last_request.body,
)
cli.write_points(self.dummy_points, time_precision='ms')
self.assertEqual(
b'cpu_load_short,host=server01,region=us-west '
b'value=0.64 1257894000123\n',
m.last_request.body,
)
cli.write_points(self.dummy_points, time_precision='s')
self.assertEqual(
b"cpu_load_short,host=server01,region=us-west "
b"value=0.64 1257894000\n",
m.last_request.body,
)
cli.write_points(self.dummy_points, time_precision='m')
self.assertEqual(
b'cpu_load_short,host=server01,region=us-west '
b'value=0.64 20964900\n',
m.last_request.body,
)
cli.write_points(self.dummy_points, time_precision='h')
self.assertEqual(
b'cpu_load_short,host=server01,region=us-west '
b'value=0.64 349415\n',
m.last_request.body,
) | Test write points with precision for TestInfluxDBClient object. | test_write_points_with_precision | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_with_consistency(self):
"""Test write points with consistency for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
'http://localhost:8086/write',
status_code=204
)
cli = InfluxDBClient(database='db')
cli.write_points(self.dummy_points, consistency='any')
self.assertEqual(
m.last_request.qs,
{'db': ['db'], 'consistency': ['any']}
) | Test write points with consistency for TestInfluxDBClient object. | test_write_points_with_consistency | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_with_precision_udp(self):
"""Test write points with precision for TestInfluxDBClient object."""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = random.randint(4000, 8000)
s.bind(('0.0.0.0', port))
cli = InfluxDBClient(
'localhost', 8086, 'root', 'root',
'test', use_udp=True, udp_port=port
)
cli.write_points(self.dummy_points, time_precision='n')
received_data, addr = s.recvfrom(1024)
self.assertEqual(
b'cpu_load_short,host=server01,region=us-west '
b'value=0.64 1257894000123456000\n',
received_data,
)
cli.write_points(self.dummy_points, time_precision='u')
received_data, addr = s.recvfrom(1024)
self.assertEqual(
b'cpu_load_short,host=server01,region=us-west '
b'value=0.64 1257894000123456\n',
received_data,
)
cli.write_points(self.dummy_points, time_precision='ms')
received_data, addr = s.recvfrom(1024)
self.assertEqual(
b'cpu_load_short,host=server01,region=us-west '
b'value=0.64 1257894000123\n',
received_data,
)
cli.write_points(self.dummy_points, time_precision='s')
received_data, addr = s.recvfrom(1024)
self.assertEqual(
b"cpu_load_short,host=server01,region=us-west "
b"value=0.64 1257894000\n",
received_data,
)
cli.write_points(self.dummy_points, time_precision='m')
received_data, addr = s.recvfrom(1024)
self.assertEqual(
b'cpu_load_short,host=server01,region=us-west '
b'value=0.64 20964900\n',
received_data,
)
cli.write_points(self.dummy_points, time_precision='h')
received_data, addr = s.recvfrom(1024)
self.assertEqual(
b'cpu_load_short,host=server01,region=us-west '
b'value=0.64 349415\n',
received_data,
) | Test write points with precision for TestInfluxDBClient object. | test_write_points_with_precision_udp | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_bad_precision(self):
"""Test write points w/bad precision TestInfluxDBClient object."""
cli = InfluxDBClient()
with self.assertRaisesRegexp(
Exception,
"Invalid time precision is given. "
"\(use 'n', 'u', 'ms', 's', 'm' or 'h'\)"
):
cli.write_points(
self.dummy_points,
time_precision='g'
) | Test write points w/bad precision TestInfluxDBClient object. | test_write_points_bad_precision | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_bad_consistency(self):
"""Test write points w/bad consistency value."""
cli = InfluxDBClient()
with self.assertRaises(ValueError):
cli.write_points(
self.dummy_points,
consistency='boo'
) | Test write points w/bad consistency value. | test_write_points_bad_consistency | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_write_points_with_precision_fails(self):
"""Test write points w/precision fail for TestInfluxDBClient object."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
with _mocked_session(cli, 'post', 500):
cli.write_points_with_precision([]) | Test write points w/precision fail for TestInfluxDBClient object. | test_write_points_with_precision_fails | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_query(self):
"""Test query method for TestInfluxDBClient object."""
example_response = (
'{"results": [{"series": [{"measurement": "sdfsdfsdf", '
'"columns": ["time", "value"], "values": '
'[["2009-11-10T23:00:00Z", 0.64]]}]}, {"series": '
'[{"measurement": "cpu_load_short", "columns": ["time", "value"], '
'"values": [["2009-11-10T23:00:00Z", 0.64]]}]}]}'
)
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.GET,
"http://localhost:8086/query",
text=example_response
)
rs = self.cli.query('select * from foo')
self.assertListEqual(
list(rs[0].get_points()),
[{'value': 0.64, 'time': '2009-11-10T23:00:00Z'}]
) | Test query method for TestInfluxDBClient object. | test_query | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_query_msgpack(self):
"""Test query method with a messagepack response."""
example_response = bytes(bytearray.fromhex(
"81a7726573756c74739182ac73746174656d656e745f696400a673657269"
"65739183a46e616d65a161a7636f6c756d6e7392a474696d65a176a67661"
"6c7565739192c70c05000000005d26178a019096c8cb3ff0000000000000"
))
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.GET,
"http://localhost:8086/query",
request_headers={"Accept": "application/x-msgpack"},
headers={"Content-Type": "application/x-msgpack"},
content=example_response
)
rs = self.cli.query('select * from a')
self.assertListEqual(
list(rs.get_points()),
[{'v': 1.0, 'time': '2019-07-10T16:51:22.026253Z'}]
) | Test query method with a messagepack response. | test_query_msgpack | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_query_chunked(self):
"""Test chunked query for TestInfluxDBClient object."""
cli = InfluxDBClient(database='db')
example_object = {
'points': [
[1415206250119, 40001, 667],
[1415206244555, 30001, 7],
[1415206228241, 20001, 788],
[1415206212980, 10001, 555],
[1415197271586, 10001, 23]
],
'measurement': 'foo',
'columns': [
'time',
'sequence_number',
'val'
]
}
example_response = \
json.dumps(example_object) + json.dumps(example_object)
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.GET,
"http://localhost:8086/db/db/series",
text=example_response
)
self.assertListEqual(
cli.query('select * from foo', chunked=True),
[example_object, example_object]
) | Test chunked query for TestInfluxDBClient object. | test_query_chunked | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_query_fail(self):
"""Test query failed for TestInfluxDBClient object."""
with _mocked_session(self.cli, 'get', 401):
self.cli.query('select column_one from foo;') | Test query failed for TestInfluxDBClient object. | test_query_fail | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_create_database(self):
"""Test create database for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
"http://localhost:8086/query",
text='{"results":[{}]}'
)
self.cli.create_database('new_db')
self.assertEqual(
m.last_request.qs['q'][0],
'create database "new_db"'
) | Test create database for TestInfluxDBClient object. | test_create_database | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_create_numeric_named_database(self):
"""Test create db w/numeric name for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
"http://localhost:8086/query",
text='{"results":[{}]}'
)
self.cli.create_database('123')
self.assertEqual(
m.last_request.qs['q'][0],
'create database "123"'
) | Test create db w/numeric name for TestInfluxDBClient object. | test_create_numeric_named_database | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_drop_database(self):
"""Test drop database for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
"http://localhost:8086/query",
text='{"results":[{}]}'
)
self.cli.drop_database('new_db')
self.assertEqual(
m.last_request.qs['q'][0],
'drop database "new_db"'
) | Test drop database for TestInfluxDBClient object. | test_drop_database | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_drop_measurement(self):
"""Test drop measurement for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
"http://localhost:8086/query",
text='{"results":[{}]}'
)
self.cli.drop_measurement('new_measurement')
self.assertEqual(
m.last_request.qs['q'][0],
'drop measurement "new_measurement"'
) | Test drop measurement for TestInfluxDBClient object. | test_drop_measurement | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_drop_numeric_named_database(self):
"""Test drop numeric db for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
"http://localhost:8086/query",
text='{"results":[{}]}'
)
self.cli.drop_database('123')
self.assertEqual(
m.last_request.qs['q'][0],
'drop database "123"'
) | Test drop numeric db for TestInfluxDBClient object. | test_drop_numeric_named_database | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_get_list_database(self):
"""Test get list of databases for TestInfluxDBClient object."""
data = {'results': [
{'series': [
{'name': 'databases',
'values': [
['new_db_1'],
['new_db_2']],
'columns': ['name']}]}
]}
with _mocked_session(self.cli, 'get', 200, json.dumps(data)):
self.assertListEqual(
self.cli.get_list_database(),
[{'name': 'new_db_1'}, {'name': 'new_db_2'}]
) | Test get list of databases for TestInfluxDBClient object. | test_get_list_database | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
def test_get_list_database_fails(self):
"""Test get list of dbs fail for TestInfluxDBClient object."""
cli = InfluxDBClient('host', 8086, 'username', 'password')
with _mocked_session(cli, 'get', 401):
cli.get_list_database() | Test get list of dbs fail for TestInfluxDBClient object. | test_get_list_database_fails | python | influxdata/influxdb-python | influxdb/tests/client_test.py | https://github.com/influxdata/influxdb-python/blob/master/influxdb/tests/client_test.py | MIT |
Subsets and Splits
Django Code with Docstrings
Filters Python code examples from Django repository that contain Django-related code, helping identify relevant code snippets for understanding Django framework usage patterns.
SQL Console for Shuu12121/python-treesitter-filtered-datasetsV2
Retrieves specific code examples from the Flask repository but doesn't provide meaningful analysis or patterns beyond basic data retrieval.
HTTPX Repo Code and Docstrings
Retrieves specific code examples from the httpx repository, which is useful for understanding how particular libraries are used but doesn't provide broader analytical insights about the dataset.
Requests Repo Docstrings & Code
Retrieves code examples with their docstrings and file paths from the requests repository, providing basic filtering but limited analytical value beyond finding specific code samples.
Quart Repo Docstrings & Code
Retrieves code examples with their docstrings from the Quart repository, providing basic code samples but offering limited analytical value for understanding broader patterns or relationships in the dataset.