repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/mapmatching.py
|
match
|
python
|
def match(ctx, features, profile, gps_precision):
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
features = list(features)
if len(features) != 1:
raise click.BadParameter(
"Mapmatching requires a single LineString feature")
service = mapbox.MapMatcher(access_token=access_token)
try:
res = service.match(
features[0],
profile=profile,
gps_precision=gps_precision)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if res.status_code == 200:
stdout = click.open_file('-', 'w')
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
|
Mapbox Map Matching API lets you use snap your GPS traces
to the OpenStreetMap road and path network.
$ mapbox mapmatching trace.geojson
An access token is required, see `mapbox --help`.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/mapmatching.py#L15-L43
| null |
import click
import cligj
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.command('mapmatching', short_help="Snap GPS traces to OpenStreetMap")
@cligj.features_in_arg
@click.option("--gps-precision", default=4, type=int,
help="Assumed precision of tracking device (default 4 meters)")
@click.option('--profile', default="mapbox.driving",
type=click.Choice(mapbox.MapMatcher().valid_profiles),
help="Mapbox profile id")
@click.pass_context
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/static.py
|
staticmap
|
python
|
def staticmap(ctx, mapid, output, features, lat, lon, zoom, size):
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
if features:
features = list(
cligj.normalize_feature_inputs(None, 'features', [features]))
service = mapbox.Static(access_token=access_token)
try:
res = service.image(
mapid,
lon=lon, lat=lat, z=zoom,
width=size[0], height=size[1],
features=features, sort_keys=True)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if res.status_code == 200:
output.write(res.content)
else:
raise MapboxCLIException(res.text.strip())
|
Generate static map images from existing Mapbox map ids.
Optionally overlay with geojson features.
$ mapbox staticmap --features features.geojson mapbox.satellite out.png
$ mapbox staticmap --lon -61.7 --lat 12.1 --zoom 12 mapbox.satellite out2.png
An access token is required, see `mapbox --help`.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/static.py#L18-L47
| null |
import click
import cligj
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.command(short_help="Static map images.")
@click.argument('mapid', required=True)
@click.argument('output', type=click.File('wb'), required=True)
@click.option('--features', help="GeoJSON Features to render as overlay")
@click.option('--lat', type=float, help="Latitude")
@click.option('--lon', type=float, help="Longitude")
@click.option('--zoom', type=int, help="Zoom")
@click.option('--size', default=(600, 600), nargs=2, type=(int, int),
help="Image width and height in pixels")
@click.pass_context
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/cli.py
|
main_group
|
python
|
def main_group(ctx, verbose, quiet, access_token, config):
ctx.obj = {}
config = config or os.path.join(click.get_app_dir('mapbox'), 'mapbox.ini')
cfg = read_config(config)
if cfg:
ctx.obj['config_file'] = config
ctx.obj['cfg'] = cfg
ctx.default_map = cfg
verbosity = (os.environ.get('MAPBOX_VERBOSE') or
ctx.lookup_default('mapbox.verbosity') or 0)
if verbose or quiet:
verbosity = verbose - quiet
verbosity = int(verbosity)
configure_logging(verbosity)
access_token = (access_token or os.environ.get('MAPBOX_ACCESS_TOKEN') or
os.environ.get('MapboxAccessToken') or
ctx.lookup_default('mapbox.access-token'))
ctx.obj['verbosity'] = verbosity
ctx.obj['access_token'] = access_token
|
This is the command line interface to Mapbox web services.
Mapbox web services require an access token. Your token is shown
on the https://www.mapbox.com/studio/account/tokens/ page when you are
logged in. The token can be provided on the command line
$ mapbox --access-token MY_TOKEN ...
as an environment variable named MAPBOX_ACCESS_TOKEN (higher
precedence) or MapboxAccessToken (lower precedence).
\b
$ export MAPBOX_ACCESS_TOKEN=MY_TOKEN
$ mapbox ...
or in a config file
\b
; configuration file mapbox.ini
[mapbox]
access-token = MY_TOKEN
The OS-dependent default config file path is something like
\b
~/Library/Application Support/mapbox/mapbox.ini
~/.config/mapbox/mapbox.ini
~/.mapbox/mapbox.ini
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/cli.py#L43-L94
|
[
"def configure_logging(verbosity):\n log_level = max(10, 30 - 10 * verbosity)\n logging.basicConfig(stream=sys.stderr, level=log_level)\n",
"def read_config(cfg):\n parser = configparser.ConfigParser()\n parser.read(cfg)\n rv = {}\n for section in parser.sections():\n for key, value in parser.items(section):\n rv['{0}.{1}'.format(section, key)] = value\n return rv\n"
] |
"""
Main click group for CLI
"""
import logging
import os
import sys
import click
import cligj
import mapboxcli
from mapboxcli.compat import configparser
from mapboxcli.scripts import (
config, geocoding, directions, mapmatching, uploads, static, datasets)
def configure_logging(verbosity):
log_level = max(10, 30 - 10 * verbosity)
logging.basicConfig(stream=sys.stderr, level=log_level)
def read_config(cfg):
parser = configparser.ConfigParser()
parser.read(cfg)
rv = {}
for section in parser.sections():
for key, value in parser.items(section):
rv['{0}.{1}'.format(section, key)] = value
return rv
@click.group()
@click.version_option(version=mapboxcli.__version__, message='%(version)s')
@cligj.verbose_opt
@cligj.quiet_opt
@click.option('--access-token', help="Your Mapbox access token.")
@click.option('--config', '-c', type=click.Path(exists=True,
resolve_path=True),
help="Config file (default: '{0}/mapbox.ini'".format(
click.get_app_dir('mapbox')))
@click.pass_context
# mapbox commands are added here.
main_group.add_command(config.config)
main_group.add_command(geocoding.geocoding)
main_group.add_command(directions.directions)
main_group.add_command(mapmatching.match)
main_group.add_command(uploads.upload)
main_group.add_command(static.staticmap)
main_group.add_command(datasets.datasets)
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/config.py
|
config
|
python
|
def config(ctx):
ctx.default_map = ctx.obj['cfg']
click.echo("CLI:")
click.echo("access-token = {0}".format(ctx.obj['access_token']))
click.echo("verbosity = {0}".format(ctx.obj['verbosity']))
click.echo("")
click.echo("Environment:")
if 'MAPBOX_ACCESS_TOKEN' in os.environ:
click.echo("MAPBOX_ACCESS_TOKEN = {0}".format(
os.environ['MAPBOX_ACCESS_TOKEN']))
if 'MapboxAccessToken' in os.environ:
click.echo("MapboxAccessToken = {0}".format(
os.environ['MapboxAccessToken']))
if 'MAPBOX_VERBOSE' in os.environ:
click.echo("MAPBOX_VERBOSE = {0}".format(
os.environ['MAPBOX_VERBOSE']))
click.echo("")
if 'config_file' in ctx.obj:
click.echo("Config file {0}:".format(ctx.obj['config_file']))
for key, value in ctx.default_map.items():
click.echo("{0} = {1}".format(key, value))
click.echo("")
|
Show access token and other configuration settings.
The access token and command verbosity level can be set on the
command line, as environment variables, and in mapbox.ini config
files.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/config.py#L8-L37
| null |
import os
import click
@click.command(short_help="Show all config settings.")
@click.pass_context
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/geocoding.py
|
coords_from_query
|
python
|
def coords_from_query(query):
try:
coords = json.loads(query)
except ValueError:
vals = re.split(r'[,\s]+', query.strip())
coords = [float(v) for v in vals]
return tuple(coords[:2])
|
Transform a query line into a (lng, lat) pair of coordinates.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/geocoding.py#L24-L31
| null |
import logging
from itertools import chain
import json
import re
import click
import mapbox
from mapbox import Geocoder
from mapboxcli.compat import map
from mapboxcli.errors import MapboxCLIException
def iter_query(query):
"""Accept a filename, stream, or string.
Returns an iterator over lines of the query."""
try:
itr = click.open_file(query).readlines()
except IOError:
itr = [query]
return itr
def echo_headers(headers, file=None):
"""Echo headers, sorted."""
for k, v in sorted(headers.items()):
click.echo("{0}: {1}".format(k.title(), v), file=file)
click.echo(file=file)
@click.command(short_help="Geocode an address or coordinates.")
@click.argument('query', default='-', required=False)
@click.option(
'--forward/--reverse',
default=True,
help="Perform a forward or reverse geocode. [default: forward]")
@click.option('--include', '-i', 'include_headers',
is_flag=True, default=False,
help="Include HTTP headers in the output.")
@click.option(
'--lat', type=float, default=None,
help="Bias results toward this latitude (decimal degrees). --lon "
"is also required.")
@click.option(
'--lon', type=float, default=None,
help="Bias results toward this longitude (decimal degrees). --lat "
"is also required.")
@click.option(
'--place-type', '-t', multiple=True, metavar='NAME', default=None,
type=click.Choice(Geocoder().place_types.keys()),
help="Restrict results to one or more place types.")
@click.option('--output', '-o', default='-', help="Save output to a file.")
@click.option('--dataset', '-d', default='mapbox.places',
type=click.Choice(("mapbox.places", "mapbox.places-permanent")),
help="Source dataset for geocoding, [default: mapbox.places]")
@click.option('--country', default=None,
help="Restrict forward geocoding to specified country codes,"
"comma-separated")
@click.option('--bbox', default=None,
help="Restrict forward geocoding to specified bounding box,"
"given in minX,minY,maxX,maxY coordinates.")
@click.option('--features', is_flag=True, default=False,
help="Return results as line-delimited GeoJSON Feature sequence, "
"not a FeatureCollection")
@click.option('--limit', type=int, default=None,
help="Limit the number of returned features")
@click.pass_context
def geocoding(ctx, query, forward, include_headers, lat, lon,
place_type, output, dataset, country, bbox, features, limit):
"""This command returns places matching an address (forward mode) or
places matching coordinates (reverse mode).
In forward (the default) mode the query argument shall be an address
such as '1600 pennsylvania ave nw'.
$ mapbox geocoding '1600 pennsylvania ave nw'
In reverse mode the query argument shall be a JSON encoded array
of longitude and latitude (in that order) in decimal degrees.
$ mapbox geocoding --reverse '[-77.4371, 37.5227]'
An access token is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
stdout = click.open_file(output, 'w')
geocoder = Geocoder(name=dataset, access_token=access_token)
if forward:
if country:
country = [x.lower() for x in country.split(",")]
if bbox:
try:
bbox = tuple(map(float, bbox.split(',')))
except ValueError:
bbox = json.loads(bbox)
for q in iter_query(query):
try:
resp = geocoder.forward(
q, types=place_type, lat=lat, lon=lon,
country=country, bbox=bbox, limit=limit)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if include_headers:
echo_headers(resp.headers, file=stdout)
if resp.status_code == 200:
if features:
collection = json.loads(resp.text)
for feat in collection['features']:
click.echo(json.dumps(feat), file=stdout)
else:
click.echo(resp.text, file=stdout)
else:
raise MapboxCLIException(resp.text.strip())
else:
for lon, lat in map(coords_from_query, iter_query(query)):
try:
resp = geocoder.reverse(
lon=lon, lat=lat, types=place_type, limit=limit)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if include_headers:
echo_headers(resp.headers, file=stdout)
if resp.status_code == 200:
if features:
collection = json.loads(resp.text)
for feat in collection['features']:
click.echo(json.dumps(feat), file=stdout)
else:
click.echo(resp.text, file=stdout)
else:
raise MapboxCLIException(resp.text.strip())
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/geocoding.py
|
echo_headers
|
python
|
def echo_headers(headers, file=None):
for k, v in sorted(headers.items()):
click.echo("{0}: {1}".format(k.title(), v), file=file)
click.echo(file=file)
|
Echo headers, sorted.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/geocoding.py#L34-L38
| null |
import logging
from itertools import chain
import json
import re
import click
import mapbox
from mapbox import Geocoder
from mapboxcli.compat import map
from mapboxcli.errors import MapboxCLIException
def iter_query(query):
"""Accept a filename, stream, or string.
Returns an iterator over lines of the query."""
try:
itr = click.open_file(query).readlines()
except IOError:
itr = [query]
return itr
def coords_from_query(query):
"""Transform a query line into a (lng, lat) pair of coordinates."""
try:
coords = json.loads(query)
except ValueError:
vals = re.split(r'[,\s]+', query.strip())
coords = [float(v) for v in vals]
return tuple(coords[:2])
@click.command(short_help="Geocode an address or coordinates.")
@click.argument('query', default='-', required=False)
@click.option(
'--forward/--reverse',
default=True,
help="Perform a forward or reverse geocode. [default: forward]")
@click.option('--include', '-i', 'include_headers',
is_flag=True, default=False,
help="Include HTTP headers in the output.")
@click.option(
'--lat', type=float, default=None,
help="Bias results toward this latitude (decimal degrees). --lon "
"is also required.")
@click.option(
'--lon', type=float, default=None,
help="Bias results toward this longitude (decimal degrees). --lat "
"is also required.")
@click.option(
'--place-type', '-t', multiple=True, metavar='NAME', default=None,
type=click.Choice(Geocoder().place_types.keys()),
help="Restrict results to one or more place types.")
@click.option('--output', '-o', default='-', help="Save output to a file.")
@click.option('--dataset', '-d', default='mapbox.places',
type=click.Choice(("mapbox.places", "mapbox.places-permanent")),
help="Source dataset for geocoding, [default: mapbox.places]")
@click.option('--country', default=None,
help="Restrict forward geocoding to specified country codes,"
"comma-separated")
@click.option('--bbox', default=None,
help="Restrict forward geocoding to specified bounding box,"
"given in minX,minY,maxX,maxY coordinates.")
@click.option('--features', is_flag=True, default=False,
help="Return results as line-delimited GeoJSON Feature sequence, "
"not a FeatureCollection")
@click.option('--limit', type=int, default=None,
help="Limit the number of returned features")
@click.pass_context
def geocoding(ctx, query, forward, include_headers, lat, lon,
place_type, output, dataset, country, bbox, features, limit):
"""This command returns places matching an address (forward mode) or
places matching coordinates (reverse mode).
In forward (the default) mode the query argument shall be an address
such as '1600 pennsylvania ave nw'.
$ mapbox geocoding '1600 pennsylvania ave nw'
In reverse mode the query argument shall be a JSON encoded array
of longitude and latitude (in that order) in decimal degrees.
$ mapbox geocoding --reverse '[-77.4371, 37.5227]'
An access token is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
stdout = click.open_file(output, 'w')
geocoder = Geocoder(name=dataset, access_token=access_token)
if forward:
if country:
country = [x.lower() for x in country.split(",")]
if bbox:
try:
bbox = tuple(map(float, bbox.split(',')))
except ValueError:
bbox = json.loads(bbox)
for q in iter_query(query):
try:
resp = geocoder.forward(
q, types=place_type, lat=lat, lon=lon,
country=country, bbox=bbox, limit=limit)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if include_headers:
echo_headers(resp.headers, file=stdout)
if resp.status_code == 200:
if features:
collection = json.loads(resp.text)
for feat in collection['features']:
click.echo(json.dumps(feat), file=stdout)
else:
click.echo(resp.text, file=stdout)
else:
raise MapboxCLIException(resp.text.strip())
else:
for lon, lat in map(coords_from_query, iter_query(query)):
try:
resp = geocoder.reverse(
lon=lon, lat=lat, types=place_type, limit=limit)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if include_headers:
echo_headers(resp.headers, file=stdout)
if resp.status_code == 200:
if features:
collection = json.loads(resp.text)
for feat in collection['features']:
click.echo(json.dumps(feat), file=stdout)
else:
click.echo(resp.text, file=stdout)
else:
raise MapboxCLIException(resp.text.strip())
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/geocoding.py
|
geocoding
|
python
|
def geocoding(ctx, query, forward, include_headers, lat, lon,
place_type, output, dataset, country, bbox, features, limit):
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
stdout = click.open_file(output, 'w')
geocoder = Geocoder(name=dataset, access_token=access_token)
if forward:
if country:
country = [x.lower() for x in country.split(",")]
if bbox:
try:
bbox = tuple(map(float, bbox.split(',')))
except ValueError:
bbox = json.loads(bbox)
for q in iter_query(query):
try:
resp = geocoder.forward(
q, types=place_type, lat=lat, lon=lon,
country=country, bbox=bbox, limit=limit)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if include_headers:
echo_headers(resp.headers, file=stdout)
if resp.status_code == 200:
if features:
collection = json.loads(resp.text)
for feat in collection['features']:
click.echo(json.dumps(feat), file=stdout)
else:
click.echo(resp.text, file=stdout)
else:
raise MapboxCLIException(resp.text.strip())
else:
for lon, lat in map(coords_from_query, iter_query(query)):
try:
resp = geocoder.reverse(
lon=lon, lat=lat, types=place_type, limit=limit)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if include_headers:
echo_headers(resp.headers, file=stdout)
if resp.status_code == 200:
if features:
collection = json.loads(resp.text)
for feat in collection['features']:
click.echo(json.dumps(feat), file=stdout)
else:
click.echo(resp.text, file=stdout)
else:
raise MapboxCLIException(resp.text.strip())
|
This command returns places matching an address (forward mode) or
places matching coordinates (reverse mode).
In forward (the default) mode the query argument shall be an address
such as '1600 pennsylvania ave nw'.
$ mapbox geocoding '1600 pennsylvania ave nw'
In reverse mode the query argument shall be a JSON encoded array
of longitude and latitude (in that order) in decimal degrees.
$ mapbox geocoding --reverse '[-77.4371, 37.5227]'
An access token is required, see `mapbox --help`.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/geocoding.py#L78-L147
|
[
"def iter_query(query):\n \"\"\"Accept a filename, stream, or string.\n Returns an iterator over lines of the query.\"\"\"\n try:\n itr = click.open_file(query).readlines()\n except IOError:\n itr = [query]\n return itr\n",
"def echo_headers(headers, file=None):\n \"\"\"Echo headers, sorted.\"\"\"\n for k, v in sorted(headers.items()):\n click.echo(\"{0}: {1}\".format(k.title(), v), file=file)\n click.echo(file=file)\n"
] |
import logging
from itertools import chain
import json
import re
import click
import mapbox
from mapbox import Geocoder
from mapboxcli.compat import map
from mapboxcli.errors import MapboxCLIException
def iter_query(query):
"""Accept a filename, stream, or string.
Returns an iterator over lines of the query."""
try:
itr = click.open_file(query).readlines()
except IOError:
itr = [query]
return itr
def coords_from_query(query):
"""Transform a query line into a (lng, lat) pair of coordinates."""
try:
coords = json.loads(query)
except ValueError:
vals = re.split(r'[,\s]+', query.strip())
coords = [float(v) for v in vals]
return tuple(coords[:2])
def echo_headers(headers, file=None):
"""Echo headers, sorted."""
for k, v in sorted(headers.items()):
click.echo("{0}: {1}".format(k.title(), v), file=file)
click.echo(file=file)
@click.command(short_help="Geocode an address or coordinates.")
@click.argument('query', default='-', required=False)
@click.option(
'--forward/--reverse',
default=True,
help="Perform a forward or reverse geocode. [default: forward]")
@click.option('--include', '-i', 'include_headers',
is_flag=True, default=False,
help="Include HTTP headers in the output.")
@click.option(
'--lat', type=float, default=None,
help="Bias results toward this latitude (decimal degrees). --lon "
"is also required.")
@click.option(
'--lon', type=float, default=None,
help="Bias results toward this longitude (decimal degrees). --lat "
"is also required.")
@click.option(
'--place-type', '-t', multiple=True, metavar='NAME', default=None,
type=click.Choice(Geocoder().place_types.keys()),
help="Restrict results to one or more place types.")
@click.option('--output', '-o', default='-', help="Save output to a file.")
@click.option('--dataset', '-d', default='mapbox.places',
type=click.Choice(("mapbox.places", "mapbox.places-permanent")),
help="Source dataset for geocoding, [default: mapbox.places]")
@click.option('--country', default=None,
help="Restrict forward geocoding to specified country codes,"
"comma-separated")
@click.option('--bbox', default=None,
help="Restrict forward geocoding to specified bounding box,"
"given in minX,minY,maxX,maxY coordinates.")
@click.option('--features', is_flag=True, default=False,
help="Return results as line-delimited GeoJSON Feature sequence, "
"not a FeatureCollection")
@click.option('--limit', type=int, default=None,
help="Limit the number of returned features")
@click.pass_context
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/datasets.py
|
datasets
|
python
|
def datasets(ctx):
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Datasets(access_token=access_token)
ctx.obj['service'] = service
|
Read and write GeoJSON from Mapbox-hosted datasets
All endpoints require authentication. An access token with
appropriate dataset scopes is required, see `mapbox --help`.
Note that this API is currently a limited-access beta.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/datasets.py#L13-L24
| null |
# Datasets.
import json
import click
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.group(short_help="Read and write Mapbox datasets (has subcommands)")
@click.pass_context
@datasets.command(short_help="List datasets")
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def list(ctx, output):
"""List datasets.
Prints a list of objects describing datasets.
$ mapbox datasets list
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list()
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(short_help="Create an empty dataset")
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def create(ctx, name, description):
"""Create a new dataset.
Prints a JSON object containing the attributes
of the new dataset.
$ mapbox datasets create
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.create(name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-dataset",
short_help="Return information about a dataset")
@click.argument('dataset', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_dataset(ctx, dataset, output):
"""Read the attributes of a dataset.
Prints a JSON object containing the attributes
of a dataset. The attributes: owner (a Mapbox account),
id (dataset id), created (Unix timestamp), modified
(timestamp), name (string), and description (string).
$ mapbox datasets read-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_dataset(dataset)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="update-dataset",
short_help="Update information about a dataset")
@click.argument('dataset', required=True)
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def update_dataset(ctx, dataset, name, description):
"""Update the name and description of a dataset.
Prints a JSON object containing the updated dataset
attributes.
$ mapbox datasets update-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.update_dataset(dataset, name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-dataset", short_help="Delete a dataset")
@click.argument('dataset', required=True)
@click.pass_context
def delete_dataset(ctx, dataset):
"""Delete a dataset.
$ mapbox datasets delete-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_dataset(dataset)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="list-features",
short_help="List features in a dataset")
@click.argument('dataset', required=True)
@click.option('--reverse', '-r', default=False,
help="Read features in reverse")
@click.option('--start', '-s', default=None,
help="Feature id to begin reading from")
@click.option('--limit', '-l', default=None,
help="Maximum number of features to return")
@click.option('--output', '-o', default='-',
help="Save output to a file")
@click.pass_context
def list_features(ctx, dataset, reverse, start, limit, output):
"""Get features of a dataset.
Prints the features of the dataset as a GeoJSON feature collection.
$ mapbox datasets list-features dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list_features(dataset, reverse, start, limit)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-feature",
short_help="Read a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_feature(ctx, dataset, fid, output):
"""Read a dataset feature.
Prints a GeoJSON representation of the feature.
$ mapbox datasets read-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_feature(dataset, fid)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="put-feature",
short_help="Insert or update a single feature in a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.argument('feature', required=False, default=None)
@click.option('--input', '-i', default='-',
help="File containing a feature to put")
@click.pass_context
def put_feature(ctx, dataset, fid, feature, input):
"""Create or update a dataset feature.
The semantics of HTTP PUT apply: if the dataset has no feature
with the given `fid` a new feature will be created. Returns a
GeoJSON representation of the new or updated feature.
$ mapbox datasets put-feature dataset-id feature-id 'geojson-feature'
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
if feature is None:
stdin = click.open_file(input, 'r')
feature = stdin.read()
feature = json.loads(feature)
service = ctx.obj.get('service')
res = service.update_feature(dataset, fid, feature)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-feature",
short_help="Delete a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.pass_context
def delete_feature(ctx, dataset, fid):
"""Delete a feature.
$ mapbox datasets delete-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_feature(dataset, fid)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="create-tileset",
short_help="Generate a tileset from a dataset")
@click.argument('dataset', required=True)
@click.argument('tileset', required=True)
@click.option('--name', '-n', default=None, help="Name for the tileset")
@click.pass_context
def create_tileset(ctx, dataset, tileset, name):
"""Create a vector tileset from a dataset.
$ mapbox datasets create-tileset dataset-id username.data
Note that the tileset must start with your username and the dataset
must be one that you own. To view processing status, visit
https://www.mapbox.com/data/. You may not generate another tilesets
from the same dataset until the first processing job has completed.
All endpoints require authentication. An access token with
`uploads:write` scope is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Uploader(access_token=access_token)
uri = "mapbox://datasets/{username}/{dataset}".format(
username=tileset.split('.')[0], dataset=dataset)
res = service.create(uri, tileset, name)
if res.status_code == 201:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/datasets.py
|
create
|
python
|
def create(ctx, name, description):
service = ctx.obj.get('service')
res = service.create(name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
Create a new dataset.
Prints a JSON object containing the attributes
of the new dataset.
$ mapbox datasets create
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/datasets.py#L56-L74
| null |
# Datasets.
import json
import click
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.group(short_help="Read and write Mapbox datasets (has subcommands)")
@click.pass_context
def datasets(ctx):
"""Read and write GeoJSON from Mapbox-hosted datasets
All endpoints require authentication. An access token with
appropriate dataset scopes is required, see `mapbox --help`.
Note that this API is currently a limited-access beta.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Datasets(access_token=access_token)
ctx.obj['service'] = service
@datasets.command(short_help="List datasets")
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def list(ctx, output):
"""List datasets.
Prints a list of objects describing datasets.
$ mapbox datasets list
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list()
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(short_help="Create an empty dataset")
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
@datasets.command(name="read-dataset",
short_help="Return information about a dataset")
@click.argument('dataset', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_dataset(ctx, dataset, output):
"""Read the attributes of a dataset.
Prints a JSON object containing the attributes
of a dataset. The attributes: owner (a Mapbox account),
id (dataset id), created (Unix timestamp), modified
(timestamp), name (string), and description (string).
$ mapbox datasets read-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_dataset(dataset)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="update-dataset",
short_help="Update information about a dataset")
@click.argument('dataset', required=True)
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def update_dataset(ctx, dataset, name, description):
"""Update the name and description of a dataset.
Prints a JSON object containing the updated dataset
attributes.
$ mapbox datasets update-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.update_dataset(dataset, name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-dataset", short_help="Delete a dataset")
@click.argument('dataset', required=True)
@click.pass_context
def delete_dataset(ctx, dataset):
"""Delete a dataset.
$ mapbox datasets delete-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_dataset(dataset)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="list-features",
short_help="List features in a dataset")
@click.argument('dataset', required=True)
@click.option('--reverse', '-r', default=False,
help="Read features in reverse")
@click.option('--start', '-s', default=None,
help="Feature id to begin reading from")
@click.option('--limit', '-l', default=None,
help="Maximum number of features to return")
@click.option('--output', '-o', default='-',
help="Save output to a file")
@click.pass_context
def list_features(ctx, dataset, reverse, start, limit, output):
"""Get features of a dataset.
Prints the features of the dataset as a GeoJSON feature collection.
$ mapbox datasets list-features dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list_features(dataset, reverse, start, limit)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-feature",
short_help="Read a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_feature(ctx, dataset, fid, output):
"""Read a dataset feature.
Prints a GeoJSON representation of the feature.
$ mapbox datasets read-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_feature(dataset, fid)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="put-feature",
short_help="Insert or update a single feature in a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.argument('feature', required=False, default=None)
@click.option('--input', '-i', default='-',
help="File containing a feature to put")
@click.pass_context
def put_feature(ctx, dataset, fid, feature, input):
"""Create or update a dataset feature.
The semantics of HTTP PUT apply: if the dataset has no feature
with the given `fid` a new feature will be created. Returns a
GeoJSON representation of the new or updated feature.
$ mapbox datasets put-feature dataset-id feature-id 'geojson-feature'
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
if feature is None:
stdin = click.open_file(input, 'r')
feature = stdin.read()
feature = json.loads(feature)
service = ctx.obj.get('service')
res = service.update_feature(dataset, fid, feature)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-feature",
short_help="Delete a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.pass_context
def delete_feature(ctx, dataset, fid):
"""Delete a feature.
$ mapbox datasets delete-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_feature(dataset, fid)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="create-tileset",
short_help="Generate a tileset from a dataset")
@click.argument('dataset', required=True)
@click.argument('tileset', required=True)
@click.option('--name', '-n', default=None, help="Name for the tileset")
@click.pass_context
def create_tileset(ctx, dataset, tileset, name):
"""Create a vector tileset from a dataset.
$ mapbox datasets create-tileset dataset-id username.data
Note that the tileset must start with your username and the dataset
must be one that you own. To view processing status, visit
https://www.mapbox.com/data/. You may not generate another tilesets
from the same dataset until the first processing job has completed.
All endpoints require authentication. An access token with
`uploads:write` scope is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Uploader(access_token=access_token)
uri = "mapbox://datasets/{username}/{dataset}".format(
username=tileset.split('.')[0], dataset=dataset)
res = service.create(uri, tileset, name)
if res.status_code == 201:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/datasets.py
|
read_dataset
|
python
|
def read_dataset(ctx, dataset, output):
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_dataset(dataset)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
|
Read the attributes of a dataset.
Prints a JSON object containing the attributes
of a dataset. The attributes: owner (a Mapbox account),
id (dataset id), created (Unix timestamp), modified
(timestamp), name (string), and description (string).
$ mapbox datasets read-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/datasets.py#L82-L103
| null |
# Datasets.
import json
import click
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.group(short_help="Read and write Mapbox datasets (has subcommands)")
@click.pass_context
def datasets(ctx):
"""Read and write GeoJSON from Mapbox-hosted datasets
All endpoints require authentication. An access token with
appropriate dataset scopes is required, see `mapbox --help`.
Note that this API is currently a limited-access beta.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Datasets(access_token=access_token)
ctx.obj['service'] = service
@datasets.command(short_help="List datasets")
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def list(ctx, output):
"""List datasets.
Prints a list of objects describing datasets.
$ mapbox datasets list
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list()
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(short_help="Create an empty dataset")
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def create(ctx, name, description):
"""Create a new dataset.
Prints a JSON object containing the attributes
of the new dataset.
$ mapbox datasets create
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.create(name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-dataset",
short_help="Return information about a dataset")
@click.argument('dataset', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
@datasets.command(name="update-dataset",
short_help="Update information about a dataset")
@click.argument('dataset', required=True)
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def update_dataset(ctx, dataset, name, description):
"""Update the name and description of a dataset.
Prints a JSON object containing the updated dataset
attributes.
$ mapbox datasets update-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.update_dataset(dataset, name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-dataset", short_help="Delete a dataset")
@click.argument('dataset', required=True)
@click.pass_context
def delete_dataset(ctx, dataset):
"""Delete a dataset.
$ mapbox datasets delete-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_dataset(dataset)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="list-features",
short_help="List features in a dataset")
@click.argument('dataset', required=True)
@click.option('--reverse', '-r', default=False,
help="Read features in reverse")
@click.option('--start', '-s', default=None,
help="Feature id to begin reading from")
@click.option('--limit', '-l', default=None,
help="Maximum number of features to return")
@click.option('--output', '-o', default='-',
help="Save output to a file")
@click.pass_context
def list_features(ctx, dataset, reverse, start, limit, output):
"""Get features of a dataset.
Prints the features of the dataset as a GeoJSON feature collection.
$ mapbox datasets list-features dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list_features(dataset, reverse, start, limit)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-feature",
short_help="Read a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_feature(ctx, dataset, fid, output):
"""Read a dataset feature.
Prints a GeoJSON representation of the feature.
$ mapbox datasets read-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_feature(dataset, fid)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="put-feature",
short_help="Insert or update a single feature in a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.argument('feature', required=False, default=None)
@click.option('--input', '-i', default='-',
help="File containing a feature to put")
@click.pass_context
def put_feature(ctx, dataset, fid, feature, input):
"""Create or update a dataset feature.
The semantics of HTTP PUT apply: if the dataset has no feature
with the given `fid` a new feature will be created. Returns a
GeoJSON representation of the new or updated feature.
$ mapbox datasets put-feature dataset-id feature-id 'geojson-feature'
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
if feature is None:
stdin = click.open_file(input, 'r')
feature = stdin.read()
feature = json.loads(feature)
service = ctx.obj.get('service')
res = service.update_feature(dataset, fid, feature)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-feature",
short_help="Delete a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.pass_context
def delete_feature(ctx, dataset, fid):
"""Delete a feature.
$ mapbox datasets delete-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_feature(dataset, fid)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="create-tileset",
short_help="Generate a tileset from a dataset")
@click.argument('dataset', required=True)
@click.argument('tileset', required=True)
@click.option('--name', '-n', default=None, help="Name for the tileset")
@click.pass_context
def create_tileset(ctx, dataset, tileset, name):
"""Create a vector tileset from a dataset.
$ mapbox datasets create-tileset dataset-id username.data
Note that the tileset must start with your username and the dataset
must be one that you own. To view processing status, visit
https://www.mapbox.com/data/. You may not generate another tilesets
from the same dataset until the first processing job has completed.
All endpoints require authentication. An access token with
`uploads:write` scope is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Uploader(access_token=access_token)
uri = "mapbox://datasets/{username}/{dataset}".format(
username=tileset.split('.')[0], dataset=dataset)
res = service.create(uri, tileset, name)
if res.status_code == 201:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/datasets.py
|
delete_dataset
|
python
|
def delete_dataset(ctx, dataset):
service = ctx.obj.get('service')
res = service.delete_dataset(dataset)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
|
Delete a dataset.
$ mapbox datasets delete-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/datasets.py#L137-L150
| null |
# Datasets.
import json
import click
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.group(short_help="Read and write Mapbox datasets (has subcommands)")
@click.pass_context
def datasets(ctx):
"""Read and write GeoJSON from Mapbox-hosted datasets
All endpoints require authentication. An access token with
appropriate dataset scopes is required, see `mapbox --help`.
Note that this API is currently a limited-access beta.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Datasets(access_token=access_token)
ctx.obj['service'] = service
@datasets.command(short_help="List datasets")
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def list(ctx, output):
"""List datasets.
Prints a list of objects describing datasets.
$ mapbox datasets list
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list()
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(short_help="Create an empty dataset")
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def create(ctx, name, description):
"""Create a new dataset.
Prints a JSON object containing the attributes
of the new dataset.
$ mapbox datasets create
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.create(name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-dataset",
short_help="Return information about a dataset")
@click.argument('dataset', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_dataset(ctx, dataset, output):
"""Read the attributes of a dataset.
Prints a JSON object containing the attributes
of a dataset. The attributes: owner (a Mapbox account),
id (dataset id), created (Unix timestamp), modified
(timestamp), name (string), and description (string).
$ mapbox datasets read-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_dataset(dataset)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="update-dataset",
short_help="Update information about a dataset")
@click.argument('dataset', required=True)
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def update_dataset(ctx, dataset, name, description):
"""Update the name and description of a dataset.
Prints a JSON object containing the updated dataset
attributes.
$ mapbox datasets update-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.update_dataset(dataset, name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-dataset", short_help="Delete a dataset")
@click.argument('dataset', required=True)
@click.pass_context
@datasets.command(name="list-features",
short_help="List features in a dataset")
@click.argument('dataset', required=True)
@click.option('--reverse', '-r', default=False,
help="Read features in reverse")
@click.option('--start', '-s', default=None,
help="Feature id to begin reading from")
@click.option('--limit', '-l', default=None,
help="Maximum number of features to return")
@click.option('--output', '-o', default='-',
help="Save output to a file")
@click.pass_context
def list_features(ctx, dataset, reverse, start, limit, output):
"""Get features of a dataset.
Prints the features of the dataset as a GeoJSON feature collection.
$ mapbox datasets list-features dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list_features(dataset, reverse, start, limit)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-feature",
short_help="Read a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_feature(ctx, dataset, fid, output):
"""Read a dataset feature.
Prints a GeoJSON representation of the feature.
$ mapbox datasets read-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_feature(dataset, fid)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="put-feature",
short_help="Insert or update a single feature in a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.argument('feature', required=False, default=None)
@click.option('--input', '-i', default='-',
help="File containing a feature to put")
@click.pass_context
def put_feature(ctx, dataset, fid, feature, input):
"""Create or update a dataset feature.
The semantics of HTTP PUT apply: if the dataset has no feature
with the given `fid` a new feature will be created. Returns a
GeoJSON representation of the new or updated feature.
$ mapbox datasets put-feature dataset-id feature-id 'geojson-feature'
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
if feature is None:
stdin = click.open_file(input, 'r')
feature = stdin.read()
feature = json.loads(feature)
service = ctx.obj.get('service')
res = service.update_feature(dataset, fid, feature)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-feature",
short_help="Delete a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.pass_context
def delete_feature(ctx, dataset, fid):
"""Delete a feature.
$ mapbox datasets delete-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_feature(dataset, fid)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="create-tileset",
short_help="Generate a tileset from a dataset")
@click.argument('dataset', required=True)
@click.argument('tileset', required=True)
@click.option('--name', '-n', default=None, help="Name for the tileset")
@click.pass_context
def create_tileset(ctx, dataset, tileset, name):
"""Create a vector tileset from a dataset.
$ mapbox datasets create-tileset dataset-id username.data
Note that the tileset must start with your username and the dataset
must be one that you own. To view processing status, visit
https://www.mapbox.com/data/. You may not generate another tilesets
from the same dataset until the first processing job has completed.
All endpoints require authentication. An access token with
`uploads:write` scope is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Uploader(access_token=access_token)
uri = "mapbox://datasets/{username}/{dataset}".format(
username=tileset.split('.')[0], dataset=dataset)
res = service.create(uri, tileset, name)
if res.status_code == 201:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/datasets.py
|
list_features
|
python
|
def list_features(ctx, dataset, reverse, start, limit, output):
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list_features(dataset, reverse, start, limit)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
|
Get features of a dataset.
Prints the features of the dataset as a GeoJSON feature collection.
$ mapbox datasets list-features dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/datasets.py#L165-L183
| null |
# Datasets.
import json
import click
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.group(short_help="Read and write Mapbox datasets (has subcommands)")
@click.pass_context
def datasets(ctx):
"""Read and write GeoJSON from Mapbox-hosted datasets
All endpoints require authentication. An access token with
appropriate dataset scopes is required, see `mapbox --help`.
Note that this API is currently a limited-access beta.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Datasets(access_token=access_token)
ctx.obj['service'] = service
@datasets.command(short_help="List datasets")
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def list(ctx, output):
"""List datasets.
Prints a list of objects describing datasets.
$ mapbox datasets list
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list()
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(short_help="Create an empty dataset")
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def create(ctx, name, description):
"""Create a new dataset.
Prints a JSON object containing the attributes
of the new dataset.
$ mapbox datasets create
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.create(name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-dataset",
short_help="Return information about a dataset")
@click.argument('dataset', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_dataset(ctx, dataset, output):
"""Read the attributes of a dataset.
Prints a JSON object containing the attributes
of a dataset. The attributes: owner (a Mapbox account),
id (dataset id), created (Unix timestamp), modified
(timestamp), name (string), and description (string).
$ mapbox datasets read-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_dataset(dataset)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="update-dataset",
short_help="Update information about a dataset")
@click.argument('dataset', required=True)
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def update_dataset(ctx, dataset, name, description):
"""Update the name and description of a dataset.
Prints a JSON object containing the updated dataset
attributes.
$ mapbox datasets update-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.update_dataset(dataset, name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-dataset", short_help="Delete a dataset")
@click.argument('dataset', required=True)
@click.pass_context
def delete_dataset(ctx, dataset):
"""Delete a dataset.
$ mapbox datasets delete-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_dataset(dataset)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="list-features",
short_help="List features in a dataset")
@click.argument('dataset', required=True)
@click.option('--reverse', '-r', default=False,
help="Read features in reverse")
@click.option('--start', '-s', default=None,
help="Feature id to begin reading from")
@click.option('--limit', '-l', default=None,
help="Maximum number of features to return")
@click.option('--output', '-o', default='-',
help="Save output to a file")
@click.pass_context
@datasets.command(name="read-feature",
short_help="Read a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_feature(ctx, dataset, fid, output):
"""Read a dataset feature.
Prints a GeoJSON representation of the feature.
$ mapbox datasets read-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_feature(dataset, fid)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="put-feature",
short_help="Insert or update a single feature in a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.argument('feature', required=False, default=None)
@click.option('--input', '-i', default='-',
help="File containing a feature to put")
@click.pass_context
def put_feature(ctx, dataset, fid, feature, input):
"""Create or update a dataset feature.
The semantics of HTTP PUT apply: if the dataset has no feature
with the given `fid` a new feature will be created. Returns a
GeoJSON representation of the new or updated feature.
$ mapbox datasets put-feature dataset-id feature-id 'geojson-feature'
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
if feature is None:
stdin = click.open_file(input, 'r')
feature = stdin.read()
feature = json.loads(feature)
service = ctx.obj.get('service')
res = service.update_feature(dataset, fid, feature)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-feature",
short_help="Delete a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.pass_context
def delete_feature(ctx, dataset, fid):
"""Delete a feature.
$ mapbox datasets delete-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_feature(dataset, fid)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="create-tileset",
short_help="Generate a tileset from a dataset")
@click.argument('dataset', required=True)
@click.argument('tileset', required=True)
@click.option('--name', '-n', default=None, help="Name for the tileset")
@click.pass_context
def create_tileset(ctx, dataset, tileset, name):
"""Create a vector tileset from a dataset.
$ mapbox datasets create-tileset dataset-id username.data
Note that the tileset must start with your username and the dataset
must be one that you own. To view processing status, visit
https://www.mapbox.com/data/. You may not generate another tilesets
from the same dataset until the first processing job has completed.
All endpoints require authentication. An access token with
`uploads:write` scope is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Uploader(access_token=access_token)
uri = "mapbox://datasets/{username}/{dataset}".format(
username=tileset.split('.')[0], dataset=dataset)
res = service.create(uri, tileset, name)
if res.status_code == 201:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/datasets.py
|
put_feature
|
python
|
def put_feature(ctx, dataset, fid, feature, input):
if feature is None:
stdin = click.open_file(input, 'r')
feature = stdin.read()
feature = json.loads(feature)
service = ctx.obj.get('service')
res = service.update_feature(dataset, fid, feature)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
Create or update a dataset feature.
The semantics of HTTP PUT apply: if the dataset has no feature
with the given `fid` a new feature will be created. Returns a
GeoJSON representation of the new or updated feature.
$ mapbox datasets put-feature dataset-id feature-id 'geojson-feature'
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/datasets.py#L221-L246
| null |
# Datasets.
import json
import click
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.group(short_help="Read and write Mapbox datasets (has subcommands)")
@click.pass_context
def datasets(ctx):
"""Read and write GeoJSON from Mapbox-hosted datasets
All endpoints require authentication. An access token with
appropriate dataset scopes is required, see `mapbox --help`.
Note that this API is currently a limited-access beta.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Datasets(access_token=access_token)
ctx.obj['service'] = service
@datasets.command(short_help="List datasets")
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def list(ctx, output):
"""List datasets.
Prints a list of objects describing datasets.
$ mapbox datasets list
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list()
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(short_help="Create an empty dataset")
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def create(ctx, name, description):
"""Create a new dataset.
Prints a JSON object containing the attributes
of the new dataset.
$ mapbox datasets create
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.create(name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-dataset",
short_help="Return information about a dataset")
@click.argument('dataset', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_dataset(ctx, dataset, output):
"""Read the attributes of a dataset.
Prints a JSON object containing the attributes
of a dataset. The attributes: owner (a Mapbox account),
id (dataset id), created (Unix timestamp), modified
(timestamp), name (string), and description (string).
$ mapbox datasets read-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_dataset(dataset)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="update-dataset",
short_help="Update information about a dataset")
@click.argument('dataset', required=True)
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def update_dataset(ctx, dataset, name, description):
"""Update the name and description of a dataset.
Prints a JSON object containing the updated dataset
attributes.
$ mapbox datasets update-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.update_dataset(dataset, name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-dataset", short_help="Delete a dataset")
@click.argument('dataset', required=True)
@click.pass_context
def delete_dataset(ctx, dataset):
"""Delete a dataset.
$ mapbox datasets delete-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_dataset(dataset)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="list-features",
short_help="List features in a dataset")
@click.argument('dataset', required=True)
@click.option('--reverse', '-r', default=False,
help="Read features in reverse")
@click.option('--start', '-s', default=None,
help="Feature id to begin reading from")
@click.option('--limit', '-l', default=None,
help="Maximum number of features to return")
@click.option('--output', '-o', default='-',
help="Save output to a file")
@click.pass_context
def list_features(ctx, dataset, reverse, start, limit, output):
"""Get features of a dataset.
Prints the features of the dataset as a GeoJSON feature collection.
$ mapbox datasets list-features dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list_features(dataset, reverse, start, limit)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-feature",
short_help="Read a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_feature(ctx, dataset, fid, output):
"""Read a dataset feature.
Prints a GeoJSON representation of the feature.
$ mapbox datasets read-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_feature(dataset, fid)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="put-feature",
short_help="Insert or update a single feature in a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.argument('feature', required=False, default=None)
@click.option('--input', '-i', default='-',
help="File containing a feature to put")
@click.pass_context
@datasets.command(name="delete-feature",
short_help="Delete a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.pass_context
def delete_feature(ctx, dataset, fid):
"""Delete a feature.
$ mapbox datasets delete-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_feature(dataset, fid)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="create-tileset",
short_help="Generate a tileset from a dataset")
@click.argument('dataset', required=True)
@click.argument('tileset', required=True)
@click.option('--name', '-n', default=None, help="Name for the tileset")
@click.pass_context
def create_tileset(ctx, dataset, tileset, name):
"""Create a vector tileset from a dataset.
$ mapbox datasets create-tileset dataset-id username.data
Note that the tileset must start with your username and the dataset
must be one that you own. To view processing status, visit
https://www.mapbox.com/data/. You may not generate another tilesets
from the same dataset until the first processing job has completed.
All endpoints require authentication. An access token with
`uploads:write` scope is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Uploader(access_token=access_token)
uri = "mapbox://datasets/{username}/{dataset}".format(
username=tileset.split('.')[0], dataset=dataset)
res = service.create(uri, tileset, name)
if res.status_code == 201:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/datasets.py
|
delete_feature
|
python
|
def delete_feature(ctx, dataset, fid):
service = ctx.obj.get('service')
res = service.delete_feature(dataset, fid)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
|
Delete a feature.
$ mapbox datasets delete-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/datasets.py#L254-L267
| null |
# Datasets.
import json
import click
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.group(short_help="Read and write Mapbox datasets (has subcommands)")
@click.pass_context
def datasets(ctx):
"""Read and write GeoJSON from Mapbox-hosted datasets
All endpoints require authentication. An access token with
appropriate dataset scopes is required, see `mapbox --help`.
Note that this API is currently a limited-access beta.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Datasets(access_token=access_token)
ctx.obj['service'] = service
@datasets.command(short_help="List datasets")
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def list(ctx, output):
"""List datasets.
Prints a list of objects describing datasets.
$ mapbox datasets list
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list()
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(short_help="Create an empty dataset")
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def create(ctx, name, description):
"""Create a new dataset.
Prints a JSON object containing the attributes
of the new dataset.
$ mapbox datasets create
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.create(name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-dataset",
short_help="Return information about a dataset")
@click.argument('dataset', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_dataset(ctx, dataset, output):
"""Read the attributes of a dataset.
Prints a JSON object containing the attributes
of a dataset. The attributes: owner (a Mapbox account),
id (dataset id), created (Unix timestamp), modified
(timestamp), name (string), and description (string).
$ mapbox datasets read-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_dataset(dataset)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="update-dataset",
short_help="Update information about a dataset")
@click.argument('dataset', required=True)
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def update_dataset(ctx, dataset, name, description):
"""Update the name and description of a dataset.
Prints a JSON object containing the updated dataset
attributes.
$ mapbox datasets update-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.update_dataset(dataset, name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-dataset", short_help="Delete a dataset")
@click.argument('dataset', required=True)
@click.pass_context
def delete_dataset(ctx, dataset):
"""Delete a dataset.
$ mapbox datasets delete-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_dataset(dataset)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="list-features",
short_help="List features in a dataset")
@click.argument('dataset', required=True)
@click.option('--reverse', '-r', default=False,
help="Read features in reverse")
@click.option('--start', '-s', default=None,
help="Feature id to begin reading from")
@click.option('--limit', '-l', default=None,
help="Maximum number of features to return")
@click.option('--output', '-o', default='-',
help="Save output to a file")
@click.pass_context
def list_features(ctx, dataset, reverse, start, limit, output):
"""Get features of a dataset.
Prints the features of the dataset as a GeoJSON feature collection.
$ mapbox datasets list-features dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list_features(dataset, reverse, start, limit)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-feature",
short_help="Read a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_feature(ctx, dataset, fid, output):
"""Read a dataset feature.
Prints a GeoJSON representation of the feature.
$ mapbox datasets read-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_feature(dataset, fid)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="put-feature",
short_help="Insert or update a single feature in a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.argument('feature', required=False, default=None)
@click.option('--input', '-i', default='-',
help="File containing a feature to put")
@click.pass_context
def put_feature(ctx, dataset, fid, feature, input):
"""Create or update a dataset feature.
The semantics of HTTP PUT apply: if the dataset has no feature
with the given `fid` a new feature will be created. Returns a
GeoJSON representation of the new or updated feature.
$ mapbox datasets put-feature dataset-id feature-id 'geojson-feature'
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
if feature is None:
stdin = click.open_file(input, 'r')
feature = stdin.read()
feature = json.loads(feature)
service = ctx.obj.get('service')
res = service.update_feature(dataset, fid, feature)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-feature",
short_help="Delete a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.pass_context
@datasets.command(name="create-tileset",
short_help="Generate a tileset from a dataset")
@click.argument('dataset', required=True)
@click.argument('tileset', required=True)
@click.option('--name', '-n', default=None, help="Name for the tileset")
@click.pass_context
def create_tileset(ctx, dataset, tileset, name):
"""Create a vector tileset from a dataset.
$ mapbox datasets create-tileset dataset-id username.data
Note that the tileset must start with your username and the dataset
must be one that you own. To view processing status, visit
https://www.mapbox.com/data/. You may not generate another tilesets
from the same dataset until the first processing job has completed.
All endpoints require authentication. An access token with
`uploads:write` scope is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Uploader(access_token=access_token)
uri = "mapbox://datasets/{username}/{dataset}".format(
username=tileset.split('.')[0], dataset=dataset)
res = service.create(uri, tileset, name)
if res.status_code == 201:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/datasets.py
|
create_tileset
|
python
|
def create_tileset(ctx, dataset, tileset, name):
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Uploader(access_token=access_token)
uri = "mapbox://datasets/{username}/{dataset}".format(
username=tileset.split('.')[0], dataset=dataset)
res = service.create(uri, tileset, name)
if res.status_code == 201:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
Create a vector tileset from a dataset.
$ mapbox datasets create-tileset dataset-id username.data
Note that the tileset must start with your username and the dataset
must be one that you own. To view processing status, visit
https://www.mapbox.com/data/. You may not generate another tilesets
from the same dataset until the first processing job has completed.
All endpoints require authentication. An access token with
`uploads:write` scope is required, see `mapbox --help`.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/datasets.py#L276-L301
| null |
# Datasets.
import json
import click
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.group(short_help="Read and write Mapbox datasets (has subcommands)")
@click.pass_context
def datasets(ctx):
"""Read and write GeoJSON from Mapbox-hosted datasets
All endpoints require authentication. An access token with
appropriate dataset scopes is required, see `mapbox --help`.
Note that this API is currently a limited-access beta.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Datasets(access_token=access_token)
ctx.obj['service'] = service
@datasets.command(short_help="List datasets")
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def list(ctx, output):
"""List datasets.
Prints a list of objects describing datasets.
$ mapbox datasets list
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list()
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(short_help="Create an empty dataset")
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def create(ctx, name, description):
"""Create a new dataset.
Prints a JSON object containing the attributes
of the new dataset.
$ mapbox datasets create
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.create(name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-dataset",
short_help="Return information about a dataset")
@click.argument('dataset', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_dataset(ctx, dataset, output):
"""Read the attributes of a dataset.
Prints a JSON object containing the attributes
of a dataset. The attributes: owner (a Mapbox account),
id (dataset id), created (Unix timestamp), modified
(timestamp), name (string), and description (string).
$ mapbox datasets read-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_dataset(dataset)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="update-dataset",
short_help="Update information about a dataset")
@click.argument('dataset', required=True)
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def update_dataset(ctx, dataset, name, description):
"""Update the name and description of a dataset.
Prints a JSON object containing the updated dataset
attributes.
$ mapbox datasets update-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.update_dataset(dataset, name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-dataset", short_help="Delete a dataset")
@click.argument('dataset', required=True)
@click.pass_context
def delete_dataset(ctx, dataset):
"""Delete a dataset.
$ mapbox datasets delete-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_dataset(dataset)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="list-features",
short_help="List features in a dataset")
@click.argument('dataset', required=True)
@click.option('--reverse', '-r', default=False,
help="Read features in reverse")
@click.option('--start', '-s', default=None,
help="Feature id to begin reading from")
@click.option('--limit', '-l', default=None,
help="Maximum number of features to return")
@click.option('--output', '-o', default='-',
help="Save output to a file")
@click.pass_context
def list_features(ctx, dataset, reverse, start, limit, output):
"""Get features of a dataset.
Prints the features of the dataset as a GeoJSON feature collection.
$ mapbox datasets list-features dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list_features(dataset, reverse, start, limit)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-feature",
short_help="Read a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_feature(ctx, dataset, fid, output):
"""Read a dataset feature.
Prints a GeoJSON representation of the feature.
$ mapbox datasets read-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_feature(dataset, fid)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="put-feature",
short_help="Insert or update a single feature in a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.argument('feature', required=False, default=None)
@click.option('--input', '-i', default='-',
help="File containing a feature to put")
@click.pass_context
def put_feature(ctx, dataset, fid, feature, input):
"""Create or update a dataset feature.
The semantics of HTTP PUT apply: if the dataset has no feature
with the given `fid` a new feature will be created. Returns a
GeoJSON representation of the new or updated feature.
$ mapbox datasets put-feature dataset-id feature-id 'geojson-feature'
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
if feature is None:
stdin = click.open_file(input, 'r')
feature = stdin.read()
feature = json.loads(feature)
service = ctx.obj.get('service')
res = service.update_feature(dataset, fid, feature)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-feature",
short_help="Delete a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.pass_context
def delete_feature(ctx, dataset, fid):
"""Delete a feature.
$ mapbox datasets delete-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_feature(dataset, fid)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="create-tileset",
short_help="Generate a tileset from a dataset")
@click.argument('dataset', required=True)
@click.argument('tileset', required=True)
@click.option('--name', '-n', default=None, help="Name for the tileset")
@click.pass_context
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/directions.py
|
directions
|
python
|
def directions(ctx, features, profile, alternatives,
geometries, overview, steps, continue_straight,
waypoint_snapping, annotations, language, output):
access_token = (ctx.obj and ctx.obj.get("access_token")) or None
service = mapbox.Directions(access_token=access_token)
# The Directions SDK expects False to be
# a bool, not a str.
if overview == "False":
overview = False
# When using waypoint snapping, the
# Directions SDK expects features to be
# a list, not a generator.
if waypoint_snapping is not None:
features = list(features)
if annotations:
annotations = annotations.split(",")
stdout = click.open_file(output, "w")
try:
res = service.directions(
features,
profile=profile,
alternatives=alternatives,
geometries=geometries,
overview=overview,
steps=steps,
continue_straight=continue_straight,
waypoint_snapping=waypoint_snapping,
annotations=annotations,
language=language
)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if res.status_code == 200:
if geometries == "geojson":
click.echo(json.dumps(res.geojson()), file=stdout)
else:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
|
The Mapbox Directions API will show you how to get
where you're going.
mapbox directions "[0, 0]" "[1, 1]"
An access token is required. See "mapbox --help".
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/directions.py#L163-L218
| null |
import json
import re
import click
import cligj
import mapbox
from mapboxcli.errors import MapboxCLIException
def waypoint_snapping_callback(ctx, param, value):
results = []
tuple_pattern = re.compile("[,]")
int_pattern = re.compile("[0-9]")
# value is an n-tuple, each element of
# which contains input from the user.
#
# Iterate over each element, determining
# whether to convert it to a tuple,
# convert it to an int, or leave it as
# a str.
#
# Append each element to results, which
# the Directions SDK will attempt to
# validate.
if len(value) == 0:
return None
for element in value:
# If the element contains a comma, then assume
# that the user intended to pass in a tuple.
#
# Convert each item in the element to an int,
# and create a tuple containing all items.
#
# Raise an error if the item is not a valid int.
#
# (The SDK accepts a three-tuple with ints for
# radius, angle, and range.)
if re.search(tuple_pattern, element):
element = re.split(tuple_pattern, element)
for index in range(0, len(element)):
try:
element[index] = int(element[index])
except ValueError as exc:
raise mapbox.errors.ValidationError(str(exc))
element = tuple(element)
results.append(element)
# If the element contains a decimal number but not
# a comma, then assume that the user intended to
# pass in an int.
#
# Convert the element to an int.
#
# Raise an error if the item is not a valid int.
#
# (The Directions SDK accepts an int for radius.)
elif re.search(int_pattern, element):
try:
element = int(element)
except ValueError as exc:
raise mapbox.errors.ValidationError(str(exc))
results.append(element)
# If the element contains neither a decimal number
# nor a comma, then assume that the user intended
# to pass in a str.
#
# Do nothing since the element is already a str.
#
# (The Directions SDK accepts a str for unlimited radius.)
else:
results.append(element)
return results
@click.command(short_help="Routing between waypoints")
@cligj.features_in_arg
@click.option(
"--profile",
type=click.Choice(mapbox.Directions.valid_profiles),
default="mapbox/driving",
help="Routing profile"
)
@click.option(
"--alternatives/--no-alternatives",
default=True,
help="Whether to try to return alternative routes"
)
@click.option(
"--geometries",
type=click.Choice(mapbox.Directions.valid_geom_encoding),
default="geojson",
help="Format of returned geometry"
)
# Directions.valid_geom_overview contains two
# elements of type str and one element of type bool.
# This causes the Directions CLI's --help option to
# raise a TypeError. To prevent this, we convert
# the bool to a str.
@click.option(
"--overview",
type=click.Choice(str(item) for item in mapbox.Directions.valid_geom_overview),
help="Type of returned overview geometry"
)
@click.option(
"--steps/--no-steps",
default=True,
help="Whether to return steps and turn-by-turn instructions"
)
@click.option(
"--continue-straight/--no-continue-straight",
default=True,
help="Whether to see the allowed direction of travel when departing the original waypoint"
)
@click.option(
"--waypoint-snapping",
multiple=True,
callback=waypoint_snapping_callback,
help="Controls waypoint snapping"
)
@click.option(
"--annotations",
help="Additional metadata along the route"
)
@click.option(
"--language",
help="Language of returned turn-by-turn instructions"
)
@click.option(
"-o",
"--output",
default="-",
help="Save output to a file"
)
@click.pass_context
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/uploads.py
|
upload
|
python
|
def upload(ctx, tileset, datasource, name, patch):
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Uploader(access_token=access_token)
if name is None:
name = tileset.split(".")[-1]
if datasource.startswith('https://'):
# Skip staging. Note this this only works for specific buckets.
res = service.create(datasource, tileset, name=name, patch=patch)
else:
sourcefile = click.File('rb')(datasource)
if hasattr(sourcefile, 'name'):
filelen = (
1 if sourcefile.name == '<stdin>'
else os.stat(sourcefile.name).st_size)
else:
filelen = (len(sourcefile.getbuffer())
if hasattr(sourcefile, 'getbuffer') else 1)
with click.progressbar(length=filelen, label='Uploading data source',
fill_char="#", empty_char='-',
file=sys.stderr) as bar:
def callback(num_bytes):
"""Update the progress bar"""
bar.update(num_bytes)
res = service.upload(sourcefile, tileset, name, patch=patch,
callback=callback)
if res.status_code == 201:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
Upload data to Mapbox accounts.
Uploaded data lands at https://www.mapbox.com/data/ and can be used
in new or existing projects. All endpoints require authentication.
You can specify the tileset id and input file
$ mapbox upload username.data mydata.geojson
Or specify just the tileset id and take an input file on stdin
$ cat mydata.geojson | mapbox upload username.data
The --name option defines the title as it appears in Studio and
defaults to the last part of the tileset id, e.g. "data"
Note that the tileset must start with your username. An access
token with upload scope is required, see `mapbox --help`.
Your account must be flagged in order to use the patch mode
feature.
|
train
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/uploads.py#L17-L76
| null |
from io import BytesIO
import os
import sys
import click
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.command(short_help="Upload datasets to Mapbox accounts")
@click.argument('tileset', required=True, type=str, metavar='TILESET')
@click.argument('datasource', type=str, default='-', metavar='[SOURCE]')
@click.option('--name', default=None, help="Name for the data upload")
@click.option('--patch', is_flag=True, default=False, help="Enable patch mode")
@click.pass_context
|
rq/Flask-RQ2
|
src/flask_rq2/cli.py
|
shared_options
|
python
|
def shared_options(rq):
"Default class options to pass to the CLI commands."
return {
'url': rq.redis_url,
'config': None,
'worker_class': rq.worker_class,
'job_class': rq.job_class,
'queue_class': rq.queue_class,
'connection_class': rq.connection_class,
}
|
Default class options to pass to the CLI commands.
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/cli.py#L35-L44
| null |
# -*- coding: utf-8 -*-
"""
flask_rq2.cli
~~~~~~~~~~~~~
Support for the Click based Flask CLI via Flask-CLI.
"""
import operator
import os
from functools import update_wrapper
import click
from rq.cli import cli as rq_cli
from rq.defaults import DEFAULT_RESULT_TTL, DEFAULT_WORKER_TTL
try:
from flask.cli import AppGroup, ScriptInfo
except ImportError: # pragma: no cover
try:
from flask_cli import AppGroup, ScriptInfo
except ImportError:
raise RuntimeError('Cannot import Flask CLI. Is it installed?')
try:
from rq_scheduler import Scheduler
from rq_scheduler.utils import setup_loghandlers
except ImportError: # pragma: no cover
Scheduler = None
_commands = {}
def rq_command(condition=True):
def wrapper(func):
"""Marks a callback as wanting to receive the RQ object we've added
to the context
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
rq = ctx.obj.data.get('rq')
return func(rq, ctx, *args, **kwargs)
updated_wrapper = update_wrapper(new_func, func)
if condition:
_commands[updated_wrapper.__name__] = updated_wrapper
return updated_wrapper
return wrapper
@click.option('--all', '-a', is_flag=True, help='Empty all queues')
@click.argument('queues', nargs=-1)
@rq_command()
def empty(rq, ctx, all, queues):
"Empty given queues."
return ctx.invoke(
rq_cli.empty,
all=all,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--all', '-a', is_flag=True, help='Requeue all failed jobs')
@click.argument('job_ids', nargs=-1)
@rq_command()
def requeue(rq, ctx, all, job_ids):
"Requeue failed jobs."
return ctx.invoke(
rq_cli.requeue,
all=all,
job_ids=job_ids,
**shared_options(rq)
)
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--interval', '-i', type=float,
help='Updates stats every N seconds (default: don\'t poll)')
@click.option('--raw', '-r', is_flag=True,
help='Print only the raw numbers, no bar charts')
@click.option('--only-queues', '-Q', is_flag=True, help='Show only queue info')
@click.option('--only-workers', '-W', is_flag=True,
help='Show only worker info')
@click.option('--by-queue', '-R', is_flag=True, help='Shows workers by queue')
@click.argument('queues', nargs=-1)
@rq_command()
def info(rq, ctx, path, interval, raw, only_queues, only_workers, by_queue,
queues):
"RQ command-line monitor."
return ctx.invoke(
rq_cli.info,
path=path,
interval=interval,
raw=raw,
only_queues=only_queues,
only_workers=only_workers,
by_queue=by_queue,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('--logging_level', type=str, default="INFO",
help='Set logging level')
@click.option('--name', '-n', help='Specify a different name')
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--results-ttl', type=int, default=DEFAULT_RESULT_TTL,
help='Default results timeout to be used')
@click.option('--worker-ttl', type=int, default=DEFAULT_WORKER_TTL,
help='Default worker timeout to be used (default: 420)')
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--quiet', '-q', is_flag=True, help='Show less output')
@click.option('--sentry-dsn', default=None, help='Sentry DSN address')
@click.option('--exception-handler', help='Exception handler(s) to use',
multiple=True)
@click.option('--pid',
help='Write the process ID number to a file at '
'the specified path')
@click.argument('queues', nargs=-1)
@rq_command()
def worker(rq, ctx, burst, logging_level, name, path, results_ttl,
worker_ttl, verbose, quiet, sentry_dsn, exception_handler, pid,
queues):
"Starts an RQ worker."
ctx.invoke(
rq_cli.worker,
burst=burst,
logging_level=logging_level,
name=name,
path=path,
results_ttl=results_ttl,
worker_ttl=worker_ttl,
verbose=verbose,
quiet=quiet,
sentry_dsn=sentry_dsn,
exception_handler=exception_handler or rq._exception_handlers,
pid=pid,
queues=queues or rq.queues,
**shared_options(rq)
)
@rq_command()
@click.option('--duration', type=int,
help='Seconds you want the workers to be suspended. '
'Default is forever.')
def suspend(rq, ctx, duration):
"Suspends all workers."
ctx.invoke(
rq_cli.suspend,
duration=duration,
**shared_options(rq)
)
@rq_command()
def resume(rq, ctx):
"Resumes all workers."
ctx.invoke(
rq_cli.resume,
**shared_options(rq)
)
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('-q', '--queue', metavar='QUEUE',
help='The name of the queue to run the scheduler with.')
@click.option('-i', '--interval', metavar='SECONDS', type=int,
help='How often the scheduler checks for new jobs to add to '
'the queue (in seconds, can be floating-point for more '
'precision).')
@click.option('--pid', metavar='FILE',
help='Write the process ID number '
'to a file at the specified path')
@rq_command(Scheduler is not None)
def scheduler(rq, ctx, verbose, burst, queue, interval, pid):
"Periodically checks for scheduled jobs."
scheduler = rq.get_scheduler(interval=interval, queue=queue)
if pid:
with open(os.path.expanduser(pid), 'w') as fp:
fp.write(str(os.getpid()))
if verbose:
level = 'DEBUG'
else:
level = 'INFO'
setup_loghandlers(level)
scheduler.run(burst=burst)
def add_commands(cli, rq):
@click.group(cls=AppGroup, help='Runs RQ commands with app context.')
@click.pass_context
def rq_group(ctx):
ctx.ensure_object(ScriptInfo).data['rq'] = rq
sorted_commands = sorted(_commands.items(), key=operator.itemgetter(0))
for name, func in sorted_commands:
rq_group.command(name=name)(func)
cli.add_command(rq_group, name='rq')
|
rq/Flask-RQ2
|
src/flask_rq2/cli.py
|
empty
|
python
|
def empty(rq, ctx, all, queues):
"Empty given queues."
return ctx.invoke(
rq_cli.empty,
all=all,
queues=queues or rq.queues,
**shared_options(rq)
)
|
Empty given queues.
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/cli.py#L66-L73
|
[
"def shared_options(rq):\n \"Default class options to pass to the CLI commands.\"\n return {\n 'url': rq.redis_url,\n 'config': None,\n 'worker_class': rq.worker_class,\n 'job_class': rq.job_class,\n 'queue_class': rq.queue_class,\n 'connection_class': rq.connection_class,\n }\n"
] |
# -*- coding: utf-8 -*-
"""
flask_rq2.cli
~~~~~~~~~~~~~
Support for the Click based Flask CLI via Flask-CLI.
"""
import operator
import os
from functools import update_wrapper
import click
from rq.cli import cli as rq_cli
from rq.defaults import DEFAULT_RESULT_TTL, DEFAULT_WORKER_TTL
try:
from flask.cli import AppGroup, ScriptInfo
except ImportError: # pragma: no cover
try:
from flask_cli import AppGroup, ScriptInfo
except ImportError:
raise RuntimeError('Cannot import Flask CLI. Is it installed?')
try:
from rq_scheduler import Scheduler
from rq_scheduler.utils import setup_loghandlers
except ImportError: # pragma: no cover
Scheduler = None
_commands = {}
def shared_options(rq):
"Default class options to pass to the CLI commands."
return {
'url': rq.redis_url,
'config': None,
'worker_class': rq.worker_class,
'job_class': rq.job_class,
'queue_class': rq.queue_class,
'connection_class': rq.connection_class,
}
def rq_command(condition=True):
def wrapper(func):
"""Marks a callback as wanting to receive the RQ object we've added
to the context
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
rq = ctx.obj.data.get('rq')
return func(rq, ctx, *args, **kwargs)
updated_wrapper = update_wrapper(new_func, func)
if condition:
_commands[updated_wrapper.__name__] = updated_wrapper
return updated_wrapper
return wrapper
@click.option('--all', '-a', is_flag=True, help='Empty all queues')
@click.argument('queues', nargs=-1)
@rq_command()
@click.option('--all', '-a', is_flag=True, help='Requeue all failed jobs')
@click.argument('job_ids', nargs=-1)
@rq_command()
def requeue(rq, ctx, all, job_ids):
"Requeue failed jobs."
return ctx.invoke(
rq_cli.requeue,
all=all,
job_ids=job_ids,
**shared_options(rq)
)
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--interval', '-i', type=float,
help='Updates stats every N seconds (default: don\'t poll)')
@click.option('--raw', '-r', is_flag=True,
help='Print only the raw numbers, no bar charts')
@click.option('--only-queues', '-Q', is_flag=True, help='Show only queue info')
@click.option('--only-workers', '-W', is_flag=True,
help='Show only worker info')
@click.option('--by-queue', '-R', is_flag=True, help='Shows workers by queue')
@click.argument('queues', nargs=-1)
@rq_command()
def info(rq, ctx, path, interval, raw, only_queues, only_workers, by_queue,
queues):
"RQ command-line monitor."
return ctx.invoke(
rq_cli.info,
path=path,
interval=interval,
raw=raw,
only_queues=only_queues,
only_workers=only_workers,
by_queue=by_queue,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('--logging_level', type=str, default="INFO",
help='Set logging level')
@click.option('--name', '-n', help='Specify a different name')
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--results-ttl', type=int, default=DEFAULT_RESULT_TTL,
help='Default results timeout to be used')
@click.option('--worker-ttl', type=int, default=DEFAULT_WORKER_TTL,
help='Default worker timeout to be used (default: 420)')
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--quiet', '-q', is_flag=True, help='Show less output')
@click.option('--sentry-dsn', default=None, help='Sentry DSN address')
@click.option('--exception-handler', help='Exception handler(s) to use',
multiple=True)
@click.option('--pid',
help='Write the process ID number to a file at '
'the specified path')
@click.argument('queues', nargs=-1)
@rq_command()
def worker(rq, ctx, burst, logging_level, name, path, results_ttl,
worker_ttl, verbose, quiet, sentry_dsn, exception_handler, pid,
queues):
"Starts an RQ worker."
ctx.invoke(
rq_cli.worker,
burst=burst,
logging_level=logging_level,
name=name,
path=path,
results_ttl=results_ttl,
worker_ttl=worker_ttl,
verbose=verbose,
quiet=quiet,
sentry_dsn=sentry_dsn,
exception_handler=exception_handler or rq._exception_handlers,
pid=pid,
queues=queues or rq.queues,
**shared_options(rq)
)
@rq_command()
@click.option('--duration', type=int,
help='Seconds you want the workers to be suspended. '
'Default is forever.')
def suspend(rq, ctx, duration):
"Suspends all workers."
ctx.invoke(
rq_cli.suspend,
duration=duration,
**shared_options(rq)
)
@rq_command()
def resume(rq, ctx):
"Resumes all workers."
ctx.invoke(
rq_cli.resume,
**shared_options(rq)
)
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('-q', '--queue', metavar='QUEUE',
help='The name of the queue to run the scheduler with.')
@click.option('-i', '--interval', metavar='SECONDS', type=int,
help='How often the scheduler checks for new jobs to add to '
'the queue (in seconds, can be floating-point for more '
'precision).')
@click.option('--pid', metavar='FILE',
help='Write the process ID number '
'to a file at the specified path')
@rq_command(Scheduler is not None)
def scheduler(rq, ctx, verbose, burst, queue, interval, pid):
"Periodically checks for scheduled jobs."
scheduler = rq.get_scheduler(interval=interval, queue=queue)
if pid:
with open(os.path.expanduser(pid), 'w') as fp:
fp.write(str(os.getpid()))
if verbose:
level = 'DEBUG'
else:
level = 'INFO'
setup_loghandlers(level)
scheduler.run(burst=burst)
def add_commands(cli, rq):
@click.group(cls=AppGroup, help='Runs RQ commands with app context.')
@click.pass_context
def rq_group(ctx):
ctx.ensure_object(ScriptInfo).data['rq'] = rq
sorted_commands = sorted(_commands.items(), key=operator.itemgetter(0))
for name, func in sorted_commands:
rq_group.command(name=name)(func)
cli.add_command(rq_group, name='rq')
|
rq/Flask-RQ2
|
src/flask_rq2/cli.py
|
requeue
|
python
|
def requeue(rq, ctx, all, job_ids):
"Requeue failed jobs."
return ctx.invoke(
rq_cli.requeue,
all=all,
job_ids=job_ids,
**shared_options(rq)
)
|
Requeue failed jobs.
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/cli.py#L79-L86
|
[
"def shared_options(rq):\n \"Default class options to pass to the CLI commands.\"\n return {\n 'url': rq.redis_url,\n 'config': None,\n 'worker_class': rq.worker_class,\n 'job_class': rq.job_class,\n 'queue_class': rq.queue_class,\n 'connection_class': rq.connection_class,\n }\n"
] |
# -*- coding: utf-8 -*-
"""
flask_rq2.cli
~~~~~~~~~~~~~
Support for the Click based Flask CLI via Flask-CLI.
"""
import operator
import os
from functools import update_wrapper
import click
from rq.cli import cli as rq_cli
from rq.defaults import DEFAULT_RESULT_TTL, DEFAULT_WORKER_TTL
try:
from flask.cli import AppGroup, ScriptInfo
except ImportError: # pragma: no cover
try:
from flask_cli import AppGroup, ScriptInfo
except ImportError:
raise RuntimeError('Cannot import Flask CLI. Is it installed?')
try:
from rq_scheduler import Scheduler
from rq_scheduler.utils import setup_loghandlers
except ImportError: # pragma: no cover
Scheduler = None
_commands = {}
def shared_options(rq):
"Default class options to pass to the CLI commands."
return {
'url': rq.redis_url,
'config': None,
'worker_class': rq.worker_class,
'job_class': rq.job_class,
'queue_class': rq.queue_class,
'connection_class': rq.connection_class,
}
def rq_command(condition=True):
def wrapper(func):
"""Marks a callback as wanting to receive the RQ object we've added
to the context
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
rq = ctx.obj.data.get('rq')
return func(rq, ctx, *args, **kwargs)
updated_wrapper = update_wrapper(new_func, func)
if condition:
_commands[updated_wrapper.__name__] = updated_wrapper
return updated_wrapper
return wrapper
@click.option('--all', '-a', is_flag=True, help='Empty all queues')
@click.argument('queues', nargs=-1)
@rq_command()
def empty(rq, ctx, all, queues):
"Empty given queues."
return ctx.invoke(
rq_cli.empty,
all=all,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--all', '-a', is_flag=True, help='Requeue all failed jobs')
@click.argument('job_ids', nargs=-1)
@rq_command()
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--interval', '-i', type=float,
help='Updates stats every N seconds (default: don\'t poll)')
@click.option('--raw', '-r', is_flag=True,
help='Print only the raw numbers, no bar charts')
@click.option('--only-queues', '-Q', is_flag=True, help='Show only queue info')
@click.option('--only-workers', '-W', is_flag=True,
help='Show only worker info')
@click.option('--by-queue', '-R', is_flag=True, help='Shows workers by queue')
@click.argument('queues', nargs=-1)
@rq_command()
def info(rq, ctx, path, interval, raw, only_queues, only_workers, by_queue,
queues):
"RQ command-line monitor."
return ctx.invoke(
rq_cli.info,
path=path,
interval=interval,
raw=raw,
only_queues=only_queues,
only_workers=only_workers,
by_queue=by_queue,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('--logging_level', type=str, default="INFO",
help='Set logging level')
@click.option('--name', '-n', help='Specify a different name')
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--results-ttl', type=int, default=DEFAULT_RESULT_TTL,
help='Default results timeout to be used')
@click.option('--worker-ttl', type=int, default=DEFAULT_WORKER_TTL,
help='Default worker timeout to be used (default: 420)')
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--quiet', '-q', is_flag=True, help='Show less output')
@click.option('--sentry-dsn', default=None, help='Sentry DSN address')
@click.option('--exception-handler', help='Exception handler(s) to use',
multiple=True)
@click.option('--pid',
help='Write the process ID number to a file at '
'the specified path')
@click.argument('queues', nargs=-1)
@rq_command()
def worker(rq, ctx, burst, logging_level, name, path, results_ttl,
worker_ttl, verbose, quiet, sentry_dsn, exception_handler, pid,
queues):
"Starts an RQ worker."
ctx.invoke(
rq_cli.worker,
burst=burst,
logging_level=logging_level,
name=name,
path=path,
results_ttl=results_ttl,
worker_ttl=worker_ttl,
verbose=verbose,
quiet=quiet,
sentry_dsn=sentry_dsn,
exception_handler=exception_handler or rq._exception_handlers,
pid=pid,
queues=queues or rq.queues,
**shared_options(rq)
)
@rq_command()
@click.option('--duration', type=int,
help='Seconds you want the workers to be suspended. '
'Default is forever.')
def suspend(rq, ctx, duration):
"Suspends all workers."
ctx.invoke(
rq_cli.suspend,
duration=duration,
**shared_options(rq)
)
@rq_command()
def resume(rq, ctx):
"Resumes all workers."
ctx.invoke(
rq_cli.resume,
**shared_options(rq)
)
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('-q', '--queue', metavar='QUEUE',
help='The name of the queue to run the scheduler with.')
@click.option('-i', '--interval', metavar='SECONDS', type=int,
help='How often the scheduler checks for new jobs to add to '
'the queue (in seconds, can be floating-point for more '
'precision).')
@click.option('--pid', metavar='FILE',
help='Write the process ID number '
'to a file at the specified path')
@rq_command(Scheduler is not None)
def scheduler(rq, ctx, verbose, burst, queue, interval, pid):
"Periodically checks for scheduled jobs."
scheduler = rq.get_scheduler(interval=interval, queue=queue)
if pid:
with open(os.path.expanduser(pid), 'w') as fp:
fp.write(str(os.getpid()))
if verbose:
level = 'DEBUG'
else:
level = 'INFO'
setup_loghandlers(level)
scheduler.run(burst=burst)
def add_commands(cli, rq):
@click.group(cls=AppGroup, help='Runs RQ commands with app context.')
@click.pass_context
def rq_group(ctx):
ctx.ensure_object(ScriptInfo).data['rq'] = rq
sorted_commands = sorted(_commands.items(), key=operator.itemgetter(0))
for name, func in sorted_commands:
rq_group.command(name=name)(func)
cli.add_command(rq_group, name='rq')
|
rq/Flask-RQ2
|
src/flask_rq2/cli.py
|
info
|
python
|
def info(rq, ctx, path, interval, raw, only_queues, only_workers, by_queue,
queues):
"RQ command-line monitor."
return ctx.invoke(
rq_cli.info,
path=path,
interval=interval,
raw=raw,
only_queues=only_queues,
only_workers=only_workers,
by_queue=by_queue,
queues=queues or rq.queues,
**shared_options(rq)
)
|
RQ command-line monitor.
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/cli.py#L100-L113
|
[
"def shared_options(rq):\n \"Default class options to pass to the CLI commands.\"\n return {\n 'url': rq.redis_url,\n 'config': None,\n 'worker_class': rq.worker_class,\n 'job_class': rq.job_class,\n 'queue_class': rq.queue_class,\n 'connection_class': rq.connection_class,\n }\n"
] |
# -*- coding: utf-8 -*-
"""
flask_rq2.cli
~~~~~~~~~~~~~
Support for the Click based Flask CLI via Flask-CLI.
"""
import operator
import os
from functools import update_wrapper
import click
from rq.cli import cli as rq_cli
from rq.defaults import DEFAULT_RESULT_TTL, DEFAULT_WORKER_TTL
try:
from flask.cli import AppGroup, ScriptInfo
except ImportError: # pragma: no cover
try:
from flask_cli import AppGroup, ScriptInfo
except ImportError:
raise RuntimeError('Cannot import Flask CLI. Is it installed?')
try:
from rq_scheduler import Scheduler
from rq_scheduler.utils import setup_loghandlers
except ImportError: # pragma: no cover
Scheduler = None
_commands = {}
def shared_options(rq):
"Default class options to pass to the CLI commands."
return {
'url': rq.redis_url,
'config': None,
'worker_class': rq.worker_class,
'job_class': rq.job_class,
'queue_class': rq.queue_class,
'connection_class': rq.connection_class,
}
def rq_command(condition=True):
def wrapper(func):
"""Marks a callback as wanting to receive the RQ object we've added
to the context
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
rq = ctx.obj.data.get('rq')
return func(rq, ctx, *args, **kwargs)
updated_wrapper = update_wrapper(new_func, func)
if condition:
_commands[updated_wrapper.__name__] = updated_wrapper
return updated_wrapper
return wrapper
@click.option('--all', '-a', is_flag=True, help='Empty all queues')
@click.argument('queues', nargs=-1)
@rq_command()
def empty(rq, ctx, all, queues):
"Empty given queues."
return ctx.invoke(
rq_cli.empty,
all=all,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--all', '-a', is_flag=True, help='Requeue all failed jobs')
@click.argument('job_ids', nargs=-1)
@rq_command()
def requeue(rq, ctx, all, job_ids):
"Requeue failed jobs."
return ctx.invoke(
rq_cli.requeue,
all=all,
job_ids=job_ids,
**shared_options(rq)
)
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--interval', '-i', type=float,
help='Updates stats every N seconds (default: don\'t poll)')
@click.option('--raw', '-r', is_flag=True,
help='Print only the raw numbers, no bar charts')
@click.option('--only-queues', '-Q', is_flag=True, help='Show only queue info')
@click.option('--only-workers', '-W', is_flag=True,
help='Show only worker info')
@click.option('--by-queue', '-R', is_flag=True, help='Shows workers by queue')
@click.argument('queues', nargs=-1)
@rq_command()
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('--logging_level', type=str, default="INFO",
help='Set logging level')
@click.option('--name', '-n', help='Specify a different name')
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--results-ttl', type=int, default=DEFAULT_RESULT_TTL,
help='Default results timeout to be used')
@click.option('--worker-ttl', type=int, default=DEFAULT_WORKER_TTL,
help='Default worker timeout to be used (default: 420)')
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--quiet', '-q', is_flag=True, help='Show less output')
@click.option('--sentry-dsn', default=None, help='Sentry DSN address')
@click.option('--exception-handler', help='Exception handler(s) to use',
multiple=True)
@click.option('--pid',
help='Write the process ID number to a file at '
'the specified path')
@click.argument('queues', nargs=-1)
@rq_command()
def worker(rq, ctx, burst, logging_level, name, path, results_ttl,
worker_ttl, verbose, quiet, sentry_dsn, exception_handler, pid,
queues):
"Starts an RQ worker."
ctx.invoke(
rq_cli.worker,
burst=burst,
logging_level=logging_level,
name=name,
path=path,
results_ttl=results_ttl,
worker_ttl=worker_ttl,
verbose=verbose,
quiet=quiet,
sentry_dsn=sentry_dsn,
exception_handler=exception_handler or rq._exception_handlers,
pid=pid,
queues=queues or rq.queues,
**shared_options(rq)
)
@rq_command()
@click.option('--duration', type=int,
help='Seconds you want the workers to be suspended. '
'Default is forever.')
def suspend(rq, ctx, duration):
"Suspends all workers."
ctx.invoke(
rq_cli.suspend,
duration=duration,
**shared_options(rq)
)
@rq_command()
def resume(rq, ctx):
"Resumes all workers."
ctx.invoke(
rq_cli.resume,
**shared_options(rq)
)
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('-q', '--queue', metavar='QUEUE',
help='The name of the queue to run the scheduler with.')
@click.option('-i', '--interval', metavar='SECONDS', type=int,
help='How often the scheduler checks for new jobs to add to '
'the queue (in seconds, can be floating-point for more '
'precision).')
@click.option('--pid', metavar='FILE',
help='Write the process ID number '
'to a file at the specified path')
@rq_command(Scheduler is not None)
def scheduler(rq, ctx, verbose, burst, queue, interval, pid):
"Periodically checks for scheduled jobs."
scheduler = rq.get_scheduler(interval=interval, queue=queue)
if pid:
with open(os.path.expanduser(pid), 'w') as fp:
fp.write(str(os.getpid()))
if verbose:
level = 'DEBUG'
else:
level = 'INFO'
setup_loghandlers(level)
scheduler.run(burst=burst)
def add_commands(cli, rq):
@click.group(cls=AppGroup, help='Runs RQ commands with app context.')
@click.pass_context
def rq_group(ctx):
ctx.ensure_object(ScriptInfo).data['rq'] = rq
sorted_commands = sorted(_commands.items(), key=operator.itemgetter(0))
for name, func in sorted_commands:
rq_group.command(name=name)(func)
cli.add_command(rq_group, name='rq')
|
rq/Flask-RQ2
|
src/flask_rq2/cli.py
|
worker
|
python
|
def worker(rq, ctx, burst, logging_level, name, path, results_ttl,
worker_ttl, verbose, quiet, sentry_dsn, exception_handler, pid,
queues):
"Starts an RQ worker."
ctx.invoke(
rq_cli.worker,
burst=burst,
logging_level=logging_level,
name=name,
path=path,
results_ttl=results_ttl,
worker_ttl=worker_ttl,
verbose=verbose,
quiet=quiet,
sentry_dsn=sentry_dsn,
exception_handler=exception_handler or rq._exception_handlers,
pid=pid,
queues=queues or rq.queues,
**shared_options(rq)
)
|
Starts an RQ worker.
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/cli.py#L136-L155
|
[
"def shared_options(rq):\n \"Default class options to pass to the CLI commands.\"\n return {\n 'url': rq.redis_url,\n 'config': None,\n 'worker_class': rq.worker_class,\n 'job_class': rq.job_class,\n 'queue_class': rq.queue_class,\n 'connection_class': rq.connection_class,\n }\n"
] |
# -*- coding: utf-8 -*-
"""
flask_rq2.cli
~~~~~~~~~~~~~
Support for the Click based Flask CLI via Flask-CLI.
"""
import operator
import os
from functools import update_wrapper
import click
from rq.cli import cli as rq_cli
from rq.defaults import DEFAULT_RESULT_TTL, DEFAULT_WORKER_TTL
try:
from flask.cli import AppGroup, ScriptInfo
except ImportError: # pragma: no cover
try:
from flask_cli import AppGroup, ScriptInfo
except ImportError:
raise RuntimeError('Cannot import Flask CLI. Is it installed?')
try:
from rq_scheduler import Scheduler
from rq_scheduler.utils import setup_loghandlers
except ImportError: # pragma: no cover
Scheduler = None
_commands = {}
def shared_options(rq):
"Default class options to pass to the CLI commands."
return {
'url': rq.redis_url,
'config': None,
'worker_class': rq.worker_class,
'job_class': rq.job_class,
'queue_class': rq.queue_class,
'connection_class': rq.connection_class,
}
def rq_command(condition=True):
def wrapper(func):
"""Marks a callback as wanting to receive the RQ object we've added
to the context
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
rq = ctx.obj.data.get('rq')
return func(rq, ctx, *args, **kwargs)
updated_wrapper = update_wrapper(new_func, func)
if condition:
_commands[updated_wrapper.__name__] = updated_wrapper
return updated_wrapper
return wrapper
@click.option('--all', '-a', is_flag=True, help='Empty all queues')
@click.argument('queues', nargs=-1)
@rq_command()
def empty(rq, ctx, all, queues):
"Empty given queues."
return ctx.invoke(
rq_cli.empty,
all=all,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--all', '-a', is_flag=True, help='Requeue all failed jobs')
@click.argument('job_ids', nargs=-1)
@rq_command()
def requeue(rq, ctx, all, job_ids):
"Requeue failed jobs."
return ctx.invoke(
rq_cli.requeue,
all=all,
job_ids=job_ids,
**shared_options(rq)
)
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--interval', '-i', type=float,
help='Updates stats every N seconds (default: don\'t poll)')
@click.option('--raw', '-r', is_flag=True,
help='Print only the raw numbers, no bar charts')
@click.option('--only-queues', '-Q', is_flag=True, help='Show only queue info')
@click.option('--only-workers', '-W', is_flag=True,
help='Show only worker info')
@click.option('--by-queue', '-R', is_flag=True, help='Shows workers by queue')
@click.argument('queues', nargs=-1)
@rq_command()
def info(rq, ctx, path, interval, raw, only_queues, only_workers, by_queue,
queues):
"RQ command-line monitor."
return ctx.invoke(
rq_cli.info,
path=path,
interval=interval,
raw=raw,
only_queues=only_queues,
only_workers=only_workers,
by_queue=by_queue,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('--logging_level', type=str, default="INFO",
help='Set logging level')
@click.option('--name', '-n', help='Specify a different name')
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--results-ttl', type=int, default=DEFAULT_RESULT_TTL,
help='Default results timeout to be used')
@click.option('--worker-ttl', type=int, default=DEFAULT_WORKER_TTL,
help='Default worker timeout to be used (default: 420)')
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--quiet', '-q', is_flag=True, help='Show less output')
@click.option('--sentry-dsn', default=None, help='Sentry DSN address')
@click.option('--exception-handler', help='Exception handler(s) to use',
multiple=True)
@click.option('--pid',
help='Write the process ID number to a file at '
'the specified path')
@click.argument('queues', nargs=-1)
@rq_command()
@rq_command()
@click.option('--duration', type=int,
help='Seconds you want the workers to be suspended. '
'Default is forever.')
def suspend(rq, ctx, duration):
"Suspends all workers."
ctx.invoke(
rq_cli.suspend,
duration=duration,
**shared_options(rq)
)
@rq_command()
def resume(rq, ctx):
"Resumes all workers."
ctx.invoke(
rq_cli.resume,
**shared_options(rq)
)
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('-q', '--queue', metavar='QUEUE',
help='The name of the queue to run the scheduler with.')
@click.option('-i', '--interval', metavar='SECONDS', type=int,
help='How often the scheduler checks for new jobs to add to '
'the queue (in seconds, can be floating-point for more '
'precision).')
@click.option('--pid', metavar='FILE',
help='Write the process ID number '
'to a file at the specified path')
@rq_command(Scheduler is not None)
def scheduler(rq, ctx, verbose, burst, queue, interval, pid):
"Periodically checks for scheduled jobs."
scheduler = rq.get_scheduler(interval=interval, queue=queue)
if pid:
with open(os.path.expanduser(pid), 'w') as fp:
fp.write(str(os.getpid()))
if verbose:
level = 'DEBUG'
else:
level = 'INFO'
setup_loghandlers(level)
scheduler.run(burst=burst)
def add_commands(cli, rq):
@click.group(cls=AppGroup, help='Runs RQ commands with app context.')
@click.pass_context
def rq_group(ctx):
ctx.ensure_object(ScriptInfo).data['rq'] = rq
sorted_commands = sorted(_commands.items(), key=operator.itemgetter(0))
for name, func in sorted_commands:
rq_group.command(name=name)(func)
cli.add_command(rq_group, name='rq')
|
rq/Flask-RQ2
|
src/flask_rq2/cli.py
|
suspend
|
python
|
def suspend(rq, ctx, duration):
"Suspends all workers."
ctx.invoke(
rq_cli.suspend,
duration=duration,
**shared_options(rq)
)
|
Suspends all workers.
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/cli.py#L162-L168
|
[
"def shared_options(rq):\n \"Default class options to pass to the CLI commands.\"\n return {\n 'url': rq.redis_url,\n 'config': None,\n 'worker_class': rq.worker_class,\n 'job_class': rq.job_class,\n 'queue_class': rq.queue_class,\n 'connection_class': rq.connection_class,\n }\n"
] |
# -*- coding: utf-8 -*-
"""
flask_rq2.cli
~~~~~~~~~~~~~
Support for the Click based Flask CLI via Flask-CLI.
"""
import operator
import os
from functools import update_wrapper
import click
from rq.cli import cli as rq_cli
from rq.defaults import DEFAULT_RESULT_TTL, DEFAULT_WORKER_TTL
try:
from flask.cli import AppGroup, ScriptInfo
except ImportError: # pragma: no cover
try:
from flask_cli import AppGroup, ScriptInfo
except ImportError:
raise RuntimeError('Cannot import Flask CLI. Is it installed?')
try:
from rq_scheduler import Scheduler
from rq_scheduler.utils import setup_loghandlers
except ImportError: # pragma: no cover
Scheduler = None
_commands = {}
def shared_options(rq):
"Default class options to pass to the CLI commands."
return {
'url': rq.redis_url,
'config': None,
'worker_class': rq.worker_class,
'job_class': rq.job_class,
'queue_class': rq.queue_class,
'connection_class': rq.connection_class,
}
def rq_command(condition=True):
def wrapper(func):
"""Marks a callback as wanting to receive the RQ object we've added
to the context
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
rq = ctx.obj.data.get('rq')
return func(rq, ctx, *args, **kwargs)
updated_wrapper = update_wrapper(new_func, func)
if condition:
_commands[updated_wrapper.__name__] = updated_wrapper
return updated_wrapper
return wrapper
@click.option('--all', '-a', is_flag=True, help='Empty all queues')
@click.argument('queues', nargs=-1)
@rq_command()
def empty(rq, ctx, all, queues):
"Empty given queues."
return ctx.invoke(
rq_cli.empty,
all=all,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--all', '-a', is_flag=True, help='Requeue all failed jobs')
@click.argument('job_ids', nargs=-1)
@rq_command()
def requeue(rq, ctx, all, job_ids):
"Requeue failed jobs."
return ctx.invoke(
rq_cli.requeue,
all=all,
job_ids=job_ids,
**shared_options(rq)
)
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--interval', '-i', type=float,
help='Updates stats every N seconds (default: don\'t poll)')
@click.option('--raw', '-r', is_flag=True,
help='Print only the raw numbers, no bar charts')
@click.option('--only-queues', '-Q', is_flag=True, help='Show only queue info')
@click.option('--only-workers', '-W', is_flag=True,
help='Show only worker info')
@click.option('--by-queue', '-R', is_flag=True, help='Shows workers by queue')
@click.argument('queues', nargs=-1)
@rq_command()
def info(rq, ctx, path, interval, raw, only_queues, only_workers, by_queue,
queues):
"RQ command-line monitor."
return ctx.invoke(
rq_cli.info,
path=path,
interval=interval,
raw=raw,
only_queues=only_queues,
only_workers=only_workers,
by_queue=by_queue,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('--logging_level', type=str, default="INFO",
help='Set logging level')
@click.option('--name', '-n', help='Specify a different name')
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--results-ttl', type=int, default=DEFAULT_RESULT_TTL,
help='Default results timeout to be used')
@click.option('--worker-ttl', type=int, default=DEFAULT_WORKER_TTL,
help='Default worker timeout to be used (default: 420)')
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--quiet', '-q', is_flag=True, help='Show less output')
@click.option('--sentry-dsn', default=None, help='Sentry DSN address')
@click.option('--exception-handler', help='Exception handler(s) to use',
multiple=True)
@click.option('--pid',
help='Write the process ID number to a file at '
'the specified path')
@click.argument('queues', nargs=-1)
@rq_command()
def worker(rq, ctx, burst, logging_level, name, path, results_ttl,
worker_ttl, verbose, quiet, sentry_dsn, exception_handler, pid,
queues):
"Starts an RQ worker."
ctx.invoke(
rq_cli.worker,
burst=burst,
logging_level=logging_level,
name=name,
path=path,
results_ttl=results_ttl,
worker_ttl=worker_ttl,
verbose=verbose,
quiet=quiet,
sentry_dsn=sentry_dsn,
exception_handler=exception_handler or rq._exception_handlers,
pid=pid,
queues=queues or rq.queues,
**shared_options(rq)
)
@rq_command()
@click.option('--duration', type=int,
help='Seconds you want the workers to be suspended. '
'Default is forever.')
@rq_command()
def resume(rq, ctx):
"Resumes all workers."
ctx.invoke(
rq_cli.resume,
**shared_options(rq)
)
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('-q', '--queue', metavar='QUEUE',
help='The name of the queue to run the scheduler with.')
@click.option('-i', '--interval', metavar='SECONDS', type=int,
help='How often the scheduler checks for new jobs to add to '
'the queue (in seconds, can be floating-point for more '
'precision).')
@click.option('--pid', metavar='FILE',
help='Write the process ID number '
'to a file at the specified path')
@rq_command(Scheduler is not None)
def scheduler(rq, ctx, verbose, burst, queue, interval, pid):
"Periodically checks for scheduled jobs."
scheduler = rq.get_scheduler(interval=interval, queue=queue)
if pid:
with open(os.path.expanduser(pid), 'w') as fp:
fp.write(str(os.getpid()))
if verbose:
level = 'DEBUG'
else:
level = 'INFO'
setup_loghandlers(level)
scheduler.run(burst=burst)
def add_commands(cli, rq):
@click.group(cls=AppGroup, help='Runs RQ commands with app context.')
@click.pass_context
def rq_group(ctx):
ctx.ensure_object(ScriptInfo).data['rq'] = rq
sorted_commands = sorted(_commands.items(), key=operator.itemgetter(0))
for name, func in sorted_commands:
rq_group.command(name=name)(func)
cli.add_command(rq_group, name='rq')
|
rq/Flask-RQ2
|
src/flask_rq2/cli.py
|
scheduler
|
python
|
def scheduler(rq, ctx, verbose, burst, queue, interval, pid):
"Periodically checks for scheduled jobs."
scheduler = rq.get_scheduler(interval=interval, queue=queue)
if pid:
with open(os.path.expanduser(pid), 'w') as fp:
fp.write(str(os.getpid()))
if verbose:
level = 'DEBUG'
else:
level = 'INFO'
setup_loghandlers(level)
scheduler.run(burst=burst)
|
Periodically checks for scheduled jobs.
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/cli.py#L193-L204
| null |
# -*- coding: utf-8 -*-
"""
flask_rq2.cli
~~~~~~~~~~~~~
Support for the Click based Flask CLI via Flask-CLI.
"""
import operator
import os
from functools import update_wrapper
import click
from rq.cli import cli as rq_cli
from rq.defaults import DEFAULT_RESULT_TTL, DEFAULT_WORKER_TTL
try:
from flask.cli import AppGroup, ScriptInfo
except ImportError: # pragma: no cover
try:
from flask_cli import AppGroup, ScriptInfo
except ImportError:
raise RuntimeError('Cannot import Flask CLI. Is it installed?')
try:
from rq_scheduler import Scheduler
from rq_scheduler.utils import setup_loghandlers
except ImportError: # pragma: no cover
Scheduler = None
_commands = {}
def shared_options(rq):
"Default class options to pass to the CLI commands."
return {
'url': rq.redis_url,
'config': None,
'worker_class': rq.worker_class,
'job_class': rq.job_class,
'queue_class': rq.queue_class,
'connection_class': rq.connection_class,
}
def rq_command(condition=True):
def wrapper(func):
"""Marks a callback as wanting to receive the RQ object we've added
to the context
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
rq = ctx.obj.data.get('rq')
return func(rq, ctx, *args, **kwargs)
updated_wrapper = update_wrapper(new_func, func)
if condition:
_commands[updated_wrapper.__name__] = updated_wrapper
return updated_wrapper
return wrapper
@click.option('--all', '-a', is_flag=True, help='Empty all queues')
@click.argument('queues', nargs=-1)
@rq_command()
def empty(rq, ctx, all, queues):
"Empty given queues."
return ctx.invoke(
rq_cli.empty,
all=all,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--all', '-a', is_flag=True, help='Requeue all failed jobs')
@click.argument('job_ids', nargs=-1)
@rq_command()
def requeue(rq, ctx, all, job_ids):
"Requeue failed jobs."
return ctx.invoke(
rq_cli.requeue,
all=all,
job_ids=job_ids,
**shared_options(rq)
)
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--interval', '-i', type=float,
help='Updates stats every N seconds (default: don\'t poll)')
@click.option('--raw', '-r', is_flag=True,
help='Print only the raw numbers, no bar charts')
@click.option('--only-queues', '-Q', is_flag=True, help='Show only queue info')
@click.option('--only-workers', '-W', is_flag=True,
help='Show only worker info')
@click.option('--by-queue', '-R', is_flag=True, help='Shows workers by queue')
@click.argument('queues', nargs=-1)
@rq_command()
def info(rq, ctx, path, interval, raw, only_queues, only_workers, by_queue,
queues):
"RQ command-line monitor."
return ctx.invoke(
rq_cli.info,
path=path,
interval=interval,
raw=raw,
only_queues=only_queues,
only_workers=only_workers,
by_queue=by_queue,
queues=queues or rq.queues,
**shared_options(rq)
)
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('--logging_level', type=str, default="INFO",
help='Set logging level')
@click.option('--name', '-n', help='Specify a different name')
@click.option('--path', '-P', default='.', help='Specify the import path.')
@click.option('--results-ttl', type=int, default=DEFAULT_RESULT_TTL,
help='Default results timeout to be used')
@click.option('--worker-ttl', type=int, default=DEFAULT_WORKER_TTL,
help='Default worker timeout to be used (default: 420)')
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--quiet', '-q', is_flag=True, help='Show less output')
@click.option('--sentry-dsn', default=None, help='Sentry DSN address')
@click.option('--exception-handler', help='Exception handler(s) to use',
multiple=True)
@click.option('--pid',
help='Write the process ID number to a file at '
'the specified path')
@click.argument('queues', nargs=-1)
@rq_command()
def worker(rq, ctx, burst, logging_level, name, path, results_ttl,
worker_ttl, verbose, quiet, sentry_dsn, exception_handler, pid,
queues):
"Starts an RQ worker."
ctx.invoke(
rq_cli.worker,
burst=burst,
logging_level=logging_level,
name=name,
path=path,
results_ttl=results_ttl,
worker_ttl=worker_ttl,
verbose=verbose,
quiet=quiet,
sentry_dsn=sentry_dsn,
exception_handler=exception_handler or rq._exception_handlers,
pid=pid,
queues=queues or rq.queues,
**shared_options(rq)
)
@rq_command()
@click.option('--duration', type=int,
help='Seconds you want the workers to be suspended. '
'Default is forever.')
def suspend(rq, ctx, duration):
"Suspends all workers."
ctx.invoke(
rq_cli.suspend,
duration=duration,
**shared_options(rq)
)
@rq_command()
def resume(rq, ctx):
"Resumes all workers."
ctx.invoke(
rq_cli.resume,
**shared_options(rq)
)
@click.option('--verbose', '-v', is_flag=True, help='Show more output')
@click.option('--burst', '-b', is_flag=True,
help='Run in burst mode (quit after all work is done)')
@click.option('-q', '--queue', metavar='QUEUE',
help='The name of the queue to run the scheduler with.')
@click.option('-i', '--interval', metavar='SECONDS', type=int,
help='How often the scheduler checks for new jobs to add to '
'the queue (in seconds, can be floating-point for more '
'precision).')
@click.option('--pid', metavar='FILE',
help='Write the process ID number '
'to a file at the specified path')
@rq_command(Scheduler is not None)
def add_commands(cli, rq):
@click.group(cls=AppGroup, help='Runs RQ commands with app context.')
@click.pass_context
def rq_group(ctx):
ctx.ensure_object(ScriptInfo).data['rq'] = rq
sorted_commands = sorted(_commands.items(), key=operator.itemgetter(0))
for name, func in sorted_commands:
rq_group.command(name=name)(func)
cli.add_command(rq_group, name='rq')
|
rq/Flask-RQ2
|
src/flask_rq2/functions.py
|
JobFunctions.queue
|
python
|
def queue(self, *args, **kwargs):
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
result_ttl = kwargs.pop('result_ttl', self.result_ttl)
ttl = kwargs.pop('ttl', self.ttl)
depends_on = kwargs.pop('depends_on', self._depends_on)
job_id = kwargs.pop('job_id', None)
at_front = kwargs.pop('at_front', self._at_front)
meta = kwargs.pop('meta', self._meta)
description = kwargs.pop('description', self._description)
return self.rq.get_queue(queue_name).enqueue_call(
self.wrapped,
args=args,
kwargs=kwargs,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
job_id=job_id,
at_front=at_front,
meta=meta,
description=description,
)
|
A function to queue a RQ job, e.g.::
@rq.job(timeout=60)
def add(x, y):
return x + y
add.queue(1, 2, timeout=30)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param result_ttl: The result TTL in seconds. If not provided
uses the job's result TTL or
:attr:`~flask_rq2.RQ.default_result_ttl`.
:type result_ttl: int
:param ttl: The job TTL in seconds. If not provided
uses the job's TTL or no TTL at all.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param job_id: A custom ID for the new job. Defaults to an
:mod:`UUID <uuid>`.
:type job_id: str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/functions.py#L65-L139
| null |
class JobFunctions(object):
"""
Some helper functions that are added to a function decorated
with a :meth:`~flask_rq2.app.RQ.job` decorator.
"""
#: the methods to add to jobs automatically
functions = ['queue', 'schedule', 'cron']
def __init__(self, rq, wrapped, queue_name, timeout, result_ttl, ttl,
depends_on, at_front, meta, description):
self.rq = rq
self.wrapped = wrapped
self._queue_name = queue_name
self._timeout = timeout
self._result_ttl = result_ttl
# job TTLs don't have a default value
# https://github.com/nvie/rq/issues/873
self.ttl = ttl
self._depends_on = depends_on
self._at_front = at_front
self._meta = meta
self._description = description
def __repr__(self):
full_name = '.'.join([self.wrapped.__module__, self.wrapped.__name__])
return '<JobFunctions %s>' % full_name
@property
def queue_name(self):
# Catch empty strings and None
return self._queue_name or self.rq.default_queue
@queue_name.setter
def queue_name(self, value):
self._queue_name = value
@property
def timeout(self):
return self._timeout or self.rq.default_timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
@property
def result_ttl(self):
# Allow a result TTL of 0
if self._result_ttl is None:
return self.rq.default_result_ttl
else:
return self._result_ttl
@result_ttl.setter
def result_ttl(self, value):
self._result_ttl = value
def schedule(self, time_or_delta, *args, **kwargs):
"""
A function to schedule running a RQ job at a given time
or after a given timespan::
@rq.job
def add(x, y):
return x + y
add.schedule(timedelta(hours=2), 1, 2, timeout=10)
add.schedule(datetime(2016, 12, 31, 23, 59, 59), 1, 2)
add.schedule(timedelta(days=14), 1, 2, repeat=1)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param result_ttl: The result TTL in seconds. If not provided
uses the job's result TTL or
:attr:`~flask_rq2.RQ.default_result_ttl`.
:type result_ttl: int
:param ttl: The job TTL in seconds. If not provided
uses the job's TTL or no TTL at all.
:type ttl: int
:param repeat: The number of times the job needs to be repeatedly
queued. Requires setting the ``interval`` parameter.
:type repeat: int
:param interval: The interval of repetition as defined by the
``repeat`` parameter in seconds.
:type interval: int
:param job_id: A custom ID for the new job. Defaults to a UUID.
:type job_id: str
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
description = kwargs.pop('description', None)
result_ttl = kwargs.pop('result_ttl', self.result_ttl)
ttl = kwargs.pop('ttl', self.ttl)
repeat = kwargs.pop('repeat', None)
interval = kwargs.pop('interval', None)
job_id = kwargs.pop('job_id', None)
if isinstance(time_or_delta, timedelta):
time = datetime.utcnow() + time_or_delta
else:
time = time_or_delta
return self.rq.get_scheduler().schedule(
time,
self.wrapped,
args=args,
kwargs=kwargs,
interval=interval,
repeat=repeat,
result_ttl=result_ttl,
ttl=ttl,
timeout=timeout,
id=job_id,
description=description,
queue_name=queue_name,
)
def cron(self, pattern, name, *args, **kwargs):
"""
A function to setup a RQ job as a cronjob::
@rq.job('low', timeout=60)
def add(x, y):
return x + y
add.cron('* * * * *', 'add-some-numbers', 1, 2, timeout=10)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param pattern: A Crontab pattern.
:type pattern: str
:param name: The name of the cronjob.
:type name: str
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param repeat: The number of times the job needs to be repeatedly
queued via the cronjob. Take care only using this for
cronjob that don't already repeat themselves natively
due to their crontab.
:type repeat: int
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
description = kwargs.pop('description', None)
repeat = kwargs.pop('repeat', None)
return self.rq.get_scheduler().cron(
pattern,
self.wrapped,
args=args,
kwargs=kwargs,
repeat=repeat,
queue_name=queue_name,
id='cron-%s' % name,
timeout=timeout,
description=description,
)
|
rq/Flask-RQ2
|
src/flask_rq2/functions.py
|
JobFunctions.schedule
|
python
|
def schedule(self, time_or_delta, *args, **kwargs):
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
description = kwargs.pop('description', None)
result_ttl = kwargs.pop('result_ttl', self.result_ttl)
ttl = kwargs.pop('ttl', self.ttl)
repeat = kwargs.pop('repeat', None)
interval = kwargs.pop('interval', None)
job_id = kwargs.pop('job_id', None)
if isinstance(time_or_delta, timedelta):
time = datetime.utcnow() + time_or_delta
else:
time = time_or_delta
return self.rq.get_scheduler().schedule(
time,
self.wrapped,
args=args,
kwargs=kwargs,
interval=interval,
repeat=repeat,
result_ttl=result_ttl,
ttl=ttl,
timeout=timeout,
id=job_id,
description=description,
queue_name=queue_name,
)
|
A function to schedule running a RQ job at a given time
or after a given timespan::
@rq.job
def add(x, y):
return x + y
add.schedule(timedelta(hours=2), 1, 2, timeout=10)
add.schedule(datetime(2016, 12, 31, 23, 59, 59), 1, 2)
add.schedule(timedelta(days=14), 1, 2, repeat=1)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param result_ttl: The result TTL in seconds. If not provided
uses the job's result TTL or
:attr:`~flask_rq2.RQ.default_result_ttl`.
:type result_ttl: int
:param ttl: The job TTL in seconds. If not provided
uses the job's TTL or no TTL at all.
:type ttl: int
:param repeat: The number of times the job needs to be repeatedly
queued. Requires setting the ``interval`` parameter.
:type repeat: int
:param interval: The interval of repetition as defined by the
``repeat`` parameter in seconds.
:type interval: int
:param job_id: A custom ID for the new job. Defaults to a UUID.
:type job_id: str
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/functions.py#L141-L221
| null |
class JobFunctions(object):
"""
Some helper functions that are added to a function decorated
with a :meth:`~flask_rq2.app.RQ.job` decorator.
"""
#: the methods to add to jobs automatically
functions = ['queue', 'schedule', 'cron']
def __init__(self, rq, wrapped, queue_name, timeout, result_ttl, ttl,
depends_on, at_front, meta, description):
self.rq = rq
self.wrapped = wrapped
self._queue_name = queue_name
self._timeout = timeout
self._result_ttl = result_ttl
# job TTLs don't have a default value
# https://github.com/nvie/rq/issues/873
self.ttl = ttl
self._depends_on = depends_on
self._at_front = at_front
self._meta = meta
self._description = description
def __repr__(self):
full_name = '.'.join([self.wrapped.__module__, self.wrapped.__name__])
return '<JobFunctions %s>' % full_name
@property
def queue_name(self):
# Catch empty strings and None
return self._queue_name or self.rq.default_queue
@queue_name.setter
def queue_name(self, value):
self._queue_name = value
@property
def timeout(self):
return self._timeout or self.rq.default_timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
@property
def result_ttl(self):
# Allow a result TTL of 0
if self._result_ttl is None:
return self.rq.default_result_ttl
else:
return self._result_ttl
@result_ttl.setter
def result_ttl(self, value):
self._result_ttl = value
def queue(self, *args, **kwargs):
"""
A function to queue a RQ job, e.g.::
@rq.job(timeout=60)
def add(x, y):
return x + y
add.queue(1, 2, timeout=30)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param result_ttl: The result TTL in seconds. If not provided
uses the job's result TTL or
:attr:`~flask_rq2.RQ.default_result_ttl`.
:type result_ttl: int
:param ttl: The job TTL in seconds. If not provided
uses the job's TTL or no TTL at all.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param job_id: A custom ID for the new job. Defaults to an
:mod:`UUID <uuid>`.
:type job_id: str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
result_ttl = kwargs.pop('result_ttl', self.result_ttl)
ttl = kwargs.pop('ttl', self.ttl)
depends_on = kwargs.pop('depends_on', self._depends_on)
job_id = kwargs.pop('job_id', None)
at_front = kwargs.pop('at_front', self._at_front)
meta = kwargs.pop('meta', self._meta)
description = kwargs.pop('description', self._description)
return self.rq.get_queue(queue_name).enqueue_call(
self.wrapped,
args=args,
kwargs=kwargs,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
job_id=job_id,
at_front=at_front,
meta=meta,
description=description,
)
def cron(self, pattern, name, *args, **kwargs):
"""
A function to setup a RQ job as a cronjob::
@rq.job('low', timeout=60)
def add(x, y):
return x + y
add.cron('* * * * *', 'add-some-numbers', 1, 2, timeout=10)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param pattern: A Crontab pattern.
:type pattern: str
:param name: The name of the cronjob.
:type name: str
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param repeat: The number of times the job needs to be repeatedly
queued via the cronjob. Take care only using this for
cronjob that don't already repeat themselves natively
due to their crontab.
:type repeat: int
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
description = kwargs.pop('description', None)
repeat = kwargs.pop('repeat', None)
return self.rq.get_scheduler().cron(
pattern,
self.wrapped,
args=args,
kwargs=kwargs,
repeat=repeat,
queue_name=queue_name,
id='cron-%s' % name,
timeout=timeout,
description=description,
)
|
rq/Flask-RQ2
|
src/flask_rq2/functions.py
|
JobFunctions.cron
|
python
|
def cron(self, pattern, name, *args, **kwargs):
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
description = kwargs.pop('description', None)
repeat = kwargs.pop('repeat', None)
return self.rq.get_scheduler().cron(
pattern,
self.wrapped,
args=args,
kwargs=kwargs,
repeat=repeat,
queue_name=queue_name,
id='cron-%s' % name,
timeout=timeout,
description=description,
)
|
A function to setup a RQ job as a cronjob::
@rq.job('low', timeout=60)
def add(x, y):
return x + y
add.cron('* * * * *', 'add-some-numbers', 1, 2, timeout=10)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param pattern: A Crontab pattern.
:type pattern: str
:param name: The name of the cronjob.
:type name: str
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param repeat: The number of times the job needs to be repeatedly
queued via the cronjob. Take care only using this for
cronjob that don't already repeat themselves natively
due to their crontab.
:type repeat: int
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/functions.py#L223-L279
| null |
class JobFunctions(object):
"""
Some helper functions that are added to a function decorated
with a :meth:`~flask_rq2.app.RQ.job` decorator.
"""
#: the methods to add to jobs automatically
functions = ['queue', 'schedule', 'cron']
def __init__(self, rq, wrapped, queue_name, timeout, result_ttl, ttl,
depends_on, at_front, meta, description):
self.rq = rq
self.wrapped = wrapped
self._queue_name = queue_name
self._timeout = timeout
self._result_ttl = result_ttl
# job TTLs don't have a default value
# https://github.com/nvie/rq/issues/873
self.ttl = ttl
self._depends_on = depends_on
self._at_front = at_front
self._meta = meta
self._description = description
def __repr__(self):
full_name = '.'.join([self.wrapped.__module__, self.wrapped.__name__])
return '<JobFunctions %s>' % full_name
@property
def queue_name(self):
# Catch empty strings and None
return self._queue_name or self.rq.default_queue
@queue_name.setter
def queue_name(self, value):
self._queue_name = value
@property
def timeout(self):
return self._timeout or self.rq.default_timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
@property
def result_ttl(self):
# Allow a result TTL of 0
if self._result_ttl is None:
return self.rq.default_result_ttl
else:
return self._result_ttl
@result_ttl.setter
def result_ttl(self, value):
self._result_ttl = value
def queue(self, *args, **kwargs):
"""
A function to queue a RQ job, e.g.::
@rq.job(timeout=60)
def add(x, y):
return x + y
add.queue(1, 2, timeout=30)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param result_ttl: The result TTL in seconds. If not provided
uses the job's result TTL or
:attr:`~flask_rq2.RQ.default_result_ttl`.
:type result_ttl: int
:param ttl: The job TTL in seconds. If not provided
uses the job's TTL or no TTL at all.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param job_id: A custom ID for the new job. Defaults to an
:mod:`UUID <uuid>`.
:type job_id: str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
result_ttl = kwargs.pop('result_ttl', self.result_ttl)
ttl = kwargs.pop('ttl', self.ttl)
depends_on = kwargs.pop('depends_on', self._depends_on)
job_id = kwargs.pop('job_id', None)
at_front = kwargs.pop('at_front', self._at_front)
meta = kwargs.pop('meta', self._meta)
description = kwargs.pop('description', self._description)
return self.rq.get_queue(queue_name).enqueue_call(
self.wrapped,
args=args,
kwargs=kwargs,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
job_id=job_id,
at_front=at_front,
meta=meta,
description=description,
)
def schedule(self, time_or_delta, *args, **kwargs):
"""
A function to schedule running a RQ job at a given time
or after a given timespan::
@rq.job
def add(x, y):
return x + y
add.schedule(timedelta(hours=2), 1, 2, timeout=10)
add.schedule(datetime(2016, 12, 31, 23, 59, 59), 1, 2)
add.schedule(timedelta(days=14), 1, 2, repeat=1)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param result_ttl: The result TTL in seconds. If not provided
uses the job's result TTL or
:attr:`~flask_rq2.RQ.default_result_ttl`.
:type result_ttl: int
:param ttl: The job TTL in seconds. If not provided
uses the job's TTL or no TTL at all.
:type ttl: int
:param repeat: The number of times the job needs to be repeatedly
queued. Requires setting the ``interval`` parameter.
:type repeat: int
:param interval: The interval of repetition as defined by the
``repeat`` parameter in seconds.
:type interval: int
:param job_id: A custom ID for the new job. Defaults to a UUID.
:type job_id: str
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
description = kwargs.pop('description', None)
result_ttl = kwargs.pop('result_ttl', self.result_ttl)
ttl = kwargs.pop('ttl', self.ttl)
repeat = kwargs.pop('repeat', None)
interval = kwargs.pop('interval', None)
job_id = kwargs.pop('job_id', None)
if isinstance(time_or_delta, timedelta):
time = datetime.utcnow() + time_or_delta
else:
time = time_or_delta
return self.rq.get_scheduler().schedule(
time,
self.wrapped,
args=args,
kwargs=kwargs,
interval=interval,
repeat=repeat,
result_ttl=result_ttl,
ttl=ttl,
timeout=timeout,
id=job_id,
description=description,
queue_name=queue_name,
)
|
rq/Flask-RQ2
|
src/flask_rq2/app.py
|
RQ.init_app
|
python
|
def init_app(self, app):
# The connection related config values
self.redis_url = app.config.setdefault(
'RQ_REDIS_URL',
self.redis_url,
)
self.connection_class = app.config.setdefault(
'RQ_CONNECTION_CLASS',
self.connection_class,
)
# all infos to create a Redis connection are now avaiable.
self._ready_to_connect = True
self.queues = app.config.setdefault(
'RQ_QUEUES',
self.queues,
)
self.queue_class = app.config.setdefault(
'RQ_QUEUE_CLASS',
self.queue_class,
)
self.worker_class = app.config.setdefault(
'RQ_WORKER_CLASS',
self.worker_class,
)
self.job_class = app.config.setdefault(
'RQ_JOB_CLASS',
self.job_class,
)
self.scheduler_class = app.config.setdefault(
'RQ_SCHEDULER_CLASS',
self.scheduler_class,
)
self.scheduler_queue = app.config.setdefault(
'RQ_SCHEDULER_QUEUE',
self.scheduler_queue,
)
self.scheduler_interval = app.config.setdefault(
'RQ_SCHEDULER_INTERVAL',
self.scheduler_interval,
)
#: Whether or not to run RQ jobs asynchronously or not,
#: defaults to asynchronous
_async = app.config.setdefault('RQ_ASYNC', True)
if self._is_async is None:
self._is_async = _async
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['rq2'] = self
if hasattr(app, 'cli'):
self.init_cli(app)
|
Initialize the app, e.g. can be used if factory pattern is used.
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/app.py#L137-L193
|
[
"def init_cli(self, app):\n \"\"\"\n Initialize the Flask CLI support in case it was enabled for the\n app.\n\n Works with both Flask>=1.0's CLI support as well as the backport\n in the Flask-CLI package for Flask<1.0.\n \"\"\"\n # in case click isn't installed after all\n if click is None:\n raise RuntimeError('Cannot import click. Is it installed?')\n # only add commands if we have a click context available\n from .cli import add_commands\n add_commands(app.cli, self)\n"
] |
class RQ(object):
"""
The main RQ object to be used in user apps.
"""
#: Name of the default queue.
default_queue = 'default'
#: The fallback default timeout value.
default_timeout = Queue.DEFAULT_TIMEOUT
#: The fallback default result TTL.
#:
#: .. versionadded:: 17.1
default_result_ttl = DEFAULT_RESULT_TTL
#: The DSN (URL) of the Redis connection.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``url`` to ``redis_url``.
redis_url = 'redis://localhost:6379/0'
#: The Redis client class to use.
#:
#: .. versionadded:: 17.1
connection_class = 'redis.StrictRedis'
#: List of queue names for RQ to work on.
queues = [default_queue]
#: Dotted import path to RQ Queue class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``queue_path`` to ``queue_class``.
queue_class = 'rq.queue.Queue'
#: Dotted import path to RQ Workers class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``worker_path`` to ``worker_class``.
worker_class = 'rq.worker.Worker'
#: Dotted import path to RQ Job class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``job_path`` to ``job_class``.
job_class = 'flask_rq2.job.FlaskJob'
#: Dotted import path to RQ Scheduler class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``scheduler_path`` to ``scheduler_class``.
#:
#: .. versionchanged:: 18.0
#: Changed to use own scheduler class.
scheduler_class = 'flask_rq2.scheduler.FlaskScheduler'
#: Name of RQ queue to schedule jobs in by rq-scheduler.
scheduler_queue = default_queue
#: Time in seconds the scheduler checks for scheduled jobs
#: periodicically.
scheduler_interval = 60
#: The default job functions class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``functions_path`` to ``functions_class``.
#: Moved from ``flask_rq2.helpers.JobFunctions`` to
# ``flask_rq2.functions.JobFunctions``.
functions_class = 'flask_rq2.functions.JobFunctions'
def __init__(self, app=None, default_timeout=None, is_async=None,
**kwargs):
"""
Initialize the RQ interface.
:param app: Flask application
:type app: :class:`flask.Flask`
:param default_timeout: The default timeout in seconds to use for jobs,
defaults to RQ's default of 180 seconds per job
:type default_timeout: int
:param is_async: Whether or not to run jobs asynchronously or
in-process, defaults to ``True``
:type is_async: bool
"""
if default_timeout is not None:
self.default_timeout = default_timeout
self._is_async = is_async
if 'async' in kwargs:
self._is_async = kwargs['async']
warnings.warn('The `async` keyword is deprecated. '
'Use `is_async` instead', DeprecationWarning)
self._jobs = []
self._exception_handlers = []
self._queue_instances = {}
self._functions_cls = import_attribute(self.functions_class)
self._ready_to_connect = False
self._connection = None
if app is not None:
self.init_app(app)
@property
def connection(self):
if not self._ready_to_connect:
raise RuntimeError('Flask-RQ2 is not ready yet to connect to '
'Redis. Was it initialized with a Flask app?')
if self._connection is None:
self._connection = self._connect()
return self._connection
def _connect(self):
connection_class = import_attribute(self.connection_class)
return connection_class.from_url(self.redis_url)
def init_cli(self, app):
"""
Initialize the Flask CLI support in case it was enabled for the
app.
Works with both Flask>=1.0's CLI support as well as the backport
in the Flask-CLI package for Flask<1.0.
"""
# in case click isn't installed after all
if click is None:
raise RuntimeError('Cannot import click. Is it installed?')
# only add commands if we have a click context available
from .cli import add_commands
add_commands(app.cli, self)
def exception_handler(self, callback):
"""
Decorator to add an exception handler to the worker, e.g.::
rq = RQ()
@rq.exception_handler
def my_custom_handler(job, *exc_info):
# do custom things here
...
"""
path = '.'.join([callback.__module__, callback.__name__])
self._exception_handlers.append(path)
return callback
def job(self, func_or_queue=None, timeout=None, result_ttl=None, ttl=None,
depends_on=None, at_front=None, meta=None, description=None):
"""
Decorator to mark functions for queuing via RQ, e.g.::
rq = RQ()
@rq.job
def add(x, y):
return x + y
or::
@rq.job(timeout=60, result_ttl=60 * 60)
def add(x, y):
return x + y
Adds various functions to the job as documented in
:class:`~flask_rq2.functions.JobFunctions`.
.. versionchanged:: 18.0
Adds ``depends_on``, ``at_front``, ``meta`` and ``description``
parameters.
:param queue: Name of the queue to add job to, defaults to
:attr:`flask_rq2.app.RQ.default_queue`.
:type queue: str
:param timeout: The maximum runtime in seconds of the job before it's
considered 'lost', defaults to 180.
:type timeout: int
:param result_ttl: Time to persist the job results in Redis,
in seconds.
:type result_ttl: int
:param ttl: The maximum queued time of the job before it'll be
cancelled.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:param description: Description of the job.
:type description: str
"""
if callable(func_or_queue):
func = func_or_queue
queue_name = None
else:
func = None
queue_name = func_or_queue
def wrapper(wrapped):
self._jobs.append(wrapped)
helper = self._functions_cls(
rq=self,
wrapped=wrapped,
queue_name=queue_name,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
at_front=at_front,
meta=meta,
description=description,
)
wrapped.helper = helper
for function in helper.functions:
callback = getattr(helper, function, None)
setattr(wrapped, function, callback)
return wrapped
if func is None:
return wrapper
else:
return wrapper(func)
def get_scheduler(self, interval=None, queue=None):
"""
When installed returns a ``rq_scheduler.Scheduler`` instance to
schedule job execution, e.g.::
scheduler = rq.get_scheduler(interval=10)
:param interval: Time in seconds of the periodic check for scheduled
jobs.
:type interval: int
:param queue: Name of the queue to enqueue in, defaults to
:attr:`~flask_rq2.RQ.scheduler_queue`.
:type queue: str
"""
if interval is None:
interval = self.scheduler_interval
if not queue:
queue = self.scheduler_queue
scheduler_cls = import_attribute(self.scheduler_class)
scheduler = scheduler_cls(
queue_name=queue,
interval=interval,
connection=self.connection,
)
return scheduler
def get_queue(self, name=None):
"""
Returns an RQ queue instance with the given name, e.g.::
default_queue = rq.get_queue()
low_queue = rq.get_queue('low')
:param name: Name of the queue to return, defaults to
:attr:`~flask_rq2.RQ.default_queue`.
:type name: str
:return: An RQ queue instance.
:rtype: ``rq.queue.Queue``
"""
if not name:
name = self.default_queue
queue = self._queue_instances.get(name)
if queue is None:
queue_cls = import_attribute(self.queue_class)
queue = queue_cls(
name=name,
default_timeout=self.default_timeout,
is_async=self._is_async,
connection=self.connection,
job_class=self.job_class
)
self._queue_instances[name] = queue
return queue
def get_worker(self, *queues):
"""
Returns an RQ worker instance for the given queue names, e.g.::
configured_worker = rq.get_worker()
default_worker = rq.get_worker('default')
default_low_worker = rq.get_worker('default', 'low')
:param \\*queues: Names of queues the worker should act on, falls back
to the configured queues.
"""
if not queues:
queues = self.queues
queues = [self.get_queue(name) for name in queues]
worker_cls = import_attribute(self.worker_class)
worker = worker_cls(
queues,
connection=self.connection,
job_class=self.job_class,
queue_class=self.queue_class,
)
for exception_handler in self._exception_handlers:
worker.push_exc_handler(import_attribute(exception_handler))
return worker
|
rq/Flask-RQ2
|
src/flask_rq2/app.py
|
RQ.init_cli
|
python
|
def init_cli(self, app):
# in case click isn't installed after all
if click is None:
raise RuntimeError('Cannot import click. Is it installed?')
# only add commands if we have a click context available
from .cli import add_commands
add_commands(app.cli, self)
|
Initialize the Flask CLI support in case it was enabled for the
app.
Works with both Flask>=1.0's CLI support as well as the backport
in the Flask-CLI package for Flask<1.0.
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/app.py#L195-L208
|
[
"def add_commands(cli, rq):\n @click.group(cls=AppGroup, help='Runs RQ commands with app context.')\n @click.pass_context\n def rq_group(ctx):\n ctx.ensure_object(ScriptInfo).data['rq'] = rq\n\n sorted_commands = sorted(_commands.items(), key=operator.itemgetter(0))\n for name, func in sorted_commands:\n rq_group.command(name=name)(func)\n\n cli.add_command(rq_group, name='rq')\n"
] |
class RQ(object):
"""
The main RQ object to be used in user apps.
"""
#: Name of the default queue.
default_queue = 'default'
#: The fallback default timeout value.
default_timeout = Queue.DEFAULT_TIMEOUT
#: The fallback default result TTL.
#:
#: .. versionadded:: 17.1
default_result_ttl = DEFAULT_RESULT_TTL
#: The DSN (URL) of the Redis connection.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``url`` to ``redis_url``.
redis_url = 'redis://localhost:6379/0'
#: The Redis client class to use.
#:
#: .. versionadded:: 17.1
connection_class = 'redis.StrictRedis'
#: List of queue names for RQ to work on.
queues = [default_queue]
#: Dotted import path to RQ Queue class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``queue_path`` to ``queue_class``.
queue_class = 'rq.queue.Queue'
#: Dotted import path to RQ Workers class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``worker_path`` to ``worker_class``.
worker_class = 'rq.worker.Worker'
#: Dotted import path to RQ Job class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``job_path`` to ``job_class``.
job_class = 'flask_rq2.job.FlaskJob'
#: Dotted import path to RQ Scheduler class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``scheduler_path`` to ``scheduler_class``.
#:
#: .. versionchanged:: 18.0
#: Changed to use own scheduler class.
scheduler_class = 'flask_rq2.scheduler.FlaskScheduler'
#: Name of RQ queue to schedule jobs in by rq-scheduler.
scheduler_queue = default_queue
#: Time in seconds the scheduler checks for scheduled jobs
#: periodicically.
scheduler_interval = 60
#: The default job functions class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``functions_path`` to ``functions_class``.
#: Moved from ``flask_rq2.helpers.JobFunctions`` to
# ``flask_rq2.functions.JobFunctions``.
functions_class = 'flask_rq2.functions.JobFunctions'
def __init__(self, app=None, default_timeout=None, is_async=None,
**kwargs):
"""
Initialize the RQ interface.
:param app: Flask application
:type app: :class:`flask.Flask`
:param default_timeout: The default timeout in seconds to use for jobs,
defaults to RQ's default of 180 seconds per job
:type default_timeout: int
:param is_async: Whether or not to run jobs asynchronously or
in-process, defaults to ``True``
:type is_async: bool
"""
if default_timeout is not None:
self.default_timeout = default_timeout
self._is_async = is_async
if 'async' in kwargs:
self._is_async = kwargs['async']
warnings.warn('The `async` keyword is deprecated. '
'Use `is_async` instead', DeprecationWarning)
self._jobs = []
self._exception_handlers = []
self._queue_instances = {}
self._functions_cls = import_attribute(self.functions_class)
self._ready_to_connect = False
self._connection = None
if app is not None:
self.init_app(app)
@property
def connection(self):
if not self._ready_to_connect:
raise RuntimeError('Flask-RQ2 is not ready yet to connect to '
'Redis. Was it initialized with a Flask app?')
if self._connection is None:
self._connection = self._connect()
return self._connection
def _connect(self):
connection_class = import_attribute(self.connection_class)
return connection_class.from_url(self.redis_url)
def init_app(self, app):
"""
Initialize the app, e.g. can be used if factory pattern is used.
"""
# The connection related config values
self.redis_url = app.config.setdefault(
'RQ_REDIS_URL',
self.redis_url,
)
self.connection_class = app.config.setdefault(
'RQ_CONNECTION_CLASS',
self.connection_class,
)
# all infos to create a Redis connection are now avaiable.
self._ready_to_connect = True
self.queues = app.config.setdefault(
'RQ_QUEUES',
self.queues,
)
self.queue_class = app.config.setdefault(
'RQ_QUEUE_CLASS',
self.queue_class,
)
self.worker_class = app.config.setdefault(
'RQ_WORKER_CLASS',
self.worker_class,
)
self.job_class = app.config.setdefault(
'RQ_JOB_CLASS',
self.job_class,
)
self.scheduler_class = app.config.setdefault(
'RQ_SCHEDULER_CLASS',
self.scheduler_class,
)
self.scheduler_queue = app.config.setdefault(
'RQ_SCHEDULER_QUEUE',
self.scheduler_queue,
)
self.scheduler_interval = app.config.setdefault(
'RQ_SCHEDULER_INTERVAL',
self.scheduler_interval,
)
#: Whether or not to run RQ jobs asynchronously or not,
#: defaults to asynchronous
_async = app.config.setdefault('RQ_ASYNC', True)
if self._is_async is None:
self._is_async = _async
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['rq2'] = self
if hasattr(app, 'cli'):
self.init_cli(app)
def exception_handler(self, callback):
"""
Decorator to add an exception handler to the worker, e.g.::
rq = RQ()
@rq.exception_handler
def my_custom_handler(job, *exc_info):
# do custom things here
...
"""
path = '.'.join([callback.__module__, callback.__name__])
self._exception_handlers.append(path)
return callback
def job(self, func_or_queue=None, timeout=None, result_ttl=None, ttl=None,
depends_on=None, at_front=None, meta=None, description=None):
"""
Decorator to mark functions for queuing via RQ, e.g.::
rq = RQ()
@rq.job
def add(x, y):
return x + y
or::
@rq.job(timeout=60, result_ttl=60 * 60)
def add(x, y):
return x + y
Adds various functions to the job as documented in
:class:`~flask_rq2.functions.JobFunctions`.
.. versionchanged:: 18.0
Adds ``depends_on``, ``at_front``, ``meta`` and ``description``
parameters.
:param queue: Name of the queue to add job to, defaults to
:attr:`flask_rq2.app.RQ.default_queue`.
:type queue: str
:param timeout: The maximum runtime in seconds of the job before it's
considered 'lost', defaults to 180.
:type timeout: int
:param result_ttl: Time to persist the job results in Redis,
in seconds.
:type result_ttl: int
:param ttl: The maximum queued time of the job before it'll be
cancelled.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:param description: Description of the job.
:type description: str
"""
if callable(func_or_queue):
func = func_or_queue
queue_name = None
else:
func = None
queue_name = func_or_queue
def wrapper(wrapped):
self._jobs.append(wrapped)
helper = self._functions_cls(
rq=self,
wrapped=wrapped,
queue_name=queue_name,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
at_front=at_front,
meta=meta,
description=description,
)
wrapped.helper = helper
for function in helper.functions:
callback = getattr(helper, function, None)
setattr(wrapped, function, callback)
return wrapped
if func is None:
return wrapper
else:
return wrapper(func)
def get_scheduler(self, interval=None, queue=None):
"""
When installed returns a ``rq_scheduler.Scheduler`` instance to
schedule job execution, e.g.::
scheduler = rq.get_scheduler(interval=10)
:param interval: Time in seconds of the periodic check for scheduled
jobs.
:type interval: int
:param queue: Name of the queue to enqueue in, defaults to
:attr:`~flask_rq2.RQ.scheduler_queue`.
:type queue: str
"""
if interval is None:
interval = self.scheduler_interval
if not queue:
queue = self.scheduler_queue
scheduler_cls = import_attribute(self.scheduler_class)
scheduler = scheduler_cls(
queue_name=queue,
interval=interval,
connection=self.connection,
)
return scheduler
def get_queue(self, name=None):
"""
Returns an RQ queue instance with the given name, e.g.::
default_queue = rq.get_queue()
low_queue = rq.get_queue('low')
:param name: Name of the queue to return, defaults to
:attr:`~flask_rq2.RQ.default_queue`.
:type name: str
:return: An RQ queue instance.
:rtype: ``rq.queue.Queue``
"""
if not name:
name = self.default_queue
queue = self._queue_instances.get(name)
if queue is None:
queue_cls = import_attribute(self.queue_class)
queue = queue_cls(
name=name,
default_timeout=self.default_timeout,
is_async=self._is_async,
connection=self.connection,
job_class=self.job_class
)
self._queue_instances[name] = queue
return queue
def get_worker(self, *queues):
"""
Returns an RQ worker instance for the given queue names, e.g.::
configured_worker = rq.get_worker()
default_worker = rq.get_worker('default')
default_low_worker = rq.get_worker('default', 'low')
:param \\*queues: Names of queues the worker should act on, falls back
to the configured queues.
"""
if not queues:
queues = self.queues
queues = [self.get_queue(name) for name in queues]
worker_cls = import_attribute(self.worker_class)
worker = worker_cls(
queues,
connection=self.connection,
job_class=self.job_class,
queue_class=self.queue_class,
)
for exception_handler in self._exception_handlers:
worker.push_exc_handler(import_attribute(exception_handler))
return worker
|
rq/Flask-RQ2
|
src/flask_rq2/app.py
|
RQ.exception_handler
|
python
|
def exception_handler(self, callback):
path = '.'.join([callback.__module__, callback.__name__])
self._exception_handlers.append(path)
return callback
|
Decorator to add an exception handler to the worker, e.g.::
rq = RQ()
@rq.exception_handler
def my_custom_handler(job, *exc_info):
# do custom things here
...
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/app.py#L210-L224
| null |
class RQ(object):
"""
The main RQ object to be used in user apps.
"""
#: Name of the default queue.
default_queue = 'default'
#: The fallback default timeout value.
default_timeout = Queue.DEFAULT_TIMEOUT
#: The fallback default result TTL.
#:
#: .. versionadded:: 17.1
default_result_ttl = DEFAULT_RESULT_TTL
#: The DSN (URL) of the Redis connection.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``url`` to ``redis_url``.
redis_url = 'redis://localhost:6379/0'
#: The Redis client class to use.
#:
#: .. versionadded:: 17.1
connection_class = 'redis.StrictRedis'
#: List of queue names for RQ to work on.
queues = [default_queue]
#: Dotted import path to RQ Queue class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``queue_path`` to ``queue_class``.
queue_class = 'rq.queue.Queue'
#: Dotted import path to RQ Workers class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``worker_path`` to ``worker_class``.
worker_class = 'rq.worker.Worker'
#: Dotted import path to RQ Job class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``job_path`` to ``job_class``.
job_class = 'flask_rq2.job.FlaskJob'
#: Dotted import path to RQ Scheduler class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``scheduler_path`` to ``scheduler_class``.
#:
#: .. versionchanged:: 18.0
#: Changed to use own scheduler class.
scheduler_class = 'flask_rq2.scheduler.FlaskScheduler'
#: Name of RQ queue to schedule jobs in by rq-scheduler.
scheduler_queue = default_queue
#: Time in seconds the scheduler checks for scheduled jobs
#: periodicically.
scheduler_interval = 60
#: The default job functions class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``functions_path`` to ``functions_class``.
#: Moved from ``flask_rq2.helpers.JobFunctions`` to
# ``flask_rq2.functions.JobFunctions``.
functions_class = 'flask_rq2.functions.JobFunctions'
def __init__(self, app=None, default_timeout=None, is_async=None,
**kwargs):
"""
Initialize the RQ interface.
:param app: Flask application
:type app: :class:`flask.Flask`
:param default_timeout: The default timeout in seconds to use for jobs,
defaults to RQ's default of 180 seconds per job
:type default_timeout: int
:param is_async: Whether or not to run jobs asynchronously or
in-process, defaults to ``True``
:type is_async: bool
"""
if default_timeout is not None:
self.default_timeout = default_timeout
self._is_async = is_async
if 'async' in kwargs:
self._is_async = kwargs['async']
warnings.warn('The `async` keyword is deprecated. '
'Use `is_async` instead', DeprecationWarning)
self._jobs = []
self._exception_handlers = []
self._queue_instances = {}
self._functions_cls = import_attribute(self.functions_class)
self._ready_to_connect = False
self._connection = None
if app is not None:
self.init_app(app)
@property
def connection(self):
if not self._ready_to_connect:
raise RuntimeError('Flask-RQ2 is not ready yet to connect to '
'Redis. Was it initialized with a Flask app?')
if self._connection is None:
self._connection = self._connect()
return self._connection
def _connect(self):
connection_class = import_attribute(self.connection_class)
return connection_class.from_url(self.redis_url)
def init_app(self, app):
"""
Initialize the app, e.g. can be used if factory pattern is used.
"""
# The connection related config values
self.redis_url = app.config.setdefault(
'RQ_REDIS_URL',
self.redis_url,
)
self.connection_class = app.config.setdefault(
'RQ_CONNECTION_CLASS',
self.connection_class,
)
# all infos to create a Redis connection are now avaiable.
self._ready_to_connect = True
self.queues = app.config.setdefault(
'RQ_QUEUES',
self.queues,
)
self.queue_class = app.config.setdefault(
'RQ_QUEUE_CLASS',
self.queue_class,
)
self.worker_class = app.config.setdefault(
'RQ_WORKER_CLASS',
self.worker_class,
)
self.job_class = app.config.setdefault(
'RQ_JOB_CLASS',
self.job_class,
)
self.scheduler_class = app.config.setdefault(
'RQ_SCHEDULER_CLASS',
self.scheduler_class,
)
self.scheduler_queue = app.config.setdefault(
'RQ_SCHEDULER_QUEUE',
self.scheduler_queue,
)
self.scheduler_interval = app.config.setdefault(
'RQ_SCHEDULER_INTERVAL',
self.scheduler_interval,
)
#: Whether or not to run RQ jobs asynchronously or not,
#: defaults to asynchronous
_async = app.config.setdefault('RQ_ASYNC', True)
if self._is_async is None:
self._is_async = _async
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['rq2'] = self
if hasattr(app, 'cli'):
self.init_cli(app)
def init_cli(self, app):
"""
Initialize the Flask CLI support in case it was enabled for the
app.
Works with both Flask>=1.0's CLI support as well as the backport
in the Flask-CLI package for Flask<1.0.
"""
# in case click isn't installed after all
if click is None:
raise RuntimeError('Cannot import click. Is it installed?')
# only add commands if we have a click context available
from .cli import add_commands
add_commands(app.cli, self)
def job(self, func_or_queue=None, timeout=None, result_ttl=None, ttl=None,
depends_on=None, at_front=None, meta=None, description=None):
"""
Decorator to mark functions for queuing via RQ, e.g.::
rq = RQ()
@rq.job
def add(x, y):
return x + y
or::
@rq.job(timeout=60, result_ttl=60 * 60)
def add(x, y):
return x + y
Adds various functions to the job as documented in
:class:`~flask_rq2.functions.JobFunctions`.
.. versionchanged:: 18.0
Adds ``depends_on``, ``at_front``, ``meta`` and ``description``
parameters.
:param queue: Name of the queue to add job to, defaults to
:attr:`flask_rq2.app.RQ.default_queue`.
:type queue: str
:param timeout: The maximum runtime in seconds of the job before it's
considered 'lost', defaults to 180.
:type timeout: int
:param result_ttl: Time to persist the job results in Redis,
in seconds.
:type result_ttl: int
:param ttl: The maximum queued time of the job before it'll be
cancelled.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:param description: Description of the job.
:type description: str
"""
if callable(func_or_queue):
func = func_or_queue
queue_name = None
else:
func = None
queue_name = func_or_queue
def wrapper(wrapped):
self._jobs.append(wrapped)
helper = self._functions_cls(
rq=self,
wrapped=wrapped,
queue_name=queue_name,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
at_front=at_front,
meta=meta,
description=description,
)
wrapped.helper = helper
for function in helper.functions:
callback = getattr(helper, function, None)
setattr(wrapped, function, callback)
return wrapped
if func is None:
return wrapper
else:
return wrapper(func)
def get_scheduler(self, interval=None, queue=None):
"""
When installed returns a ``rq_scheduler.Scheduler`` instance to
schedule job execution, e.g.::
scheduler = rq.get_scheduler(interval=10)
:param interval: Time in seconds of the periodic check for scheduled
jobs.
:type interval: int
:param queue: Name of the queue to enqueue in, defaults to
:attr:`~flask_rq2.RQ.scheduler_queue`.
:type queue: str
"""
if interval is None:
interval = self.scheduler_interval
if not queue:
queue = self.scheduler_queue
scheduler_cls = import_attribute(self.scheduler_class)
scheduler = scheduler_cls(
queue_name=queue,
interval=interval,
connection=self.connection,
)
return scheduler
def get_queue(self, name=None):
"""
Returns an RQ queue instance with the given name, e.g.::
default_queue = rq.get_queue()
low_queue = rq.get_queue('low')
:param name: Name of the queue to return, defaults to
:attr:`~flask_rq2.RQ.default_queue`.
:type name: str
:return: An RQ queue instance.
:rtype: ``rq.queue.Queue``
"""
if not name:
name = self.default_queue
queue = self._queue_instances.get(name)
if queue is None:
queue_cls = import_attribute(self.queue_class)
queue = queue_cls(
name=name,
default_timeout=self.default_timeout,
is_async=self._is_async,
connection=self.connection,
job_class=self.job_class
)
self._queue_instances[name] = queue
return queue
def get_worker(self, *queues):
"""
Returns an RQ worker instance for the given queue names, e.g.::
configured_worker = rq.get_worker()
default_worker = rq.get_worker('default')
default_low_worker = rq.get_worker('default', 'low')
:param \\*queues: Names of queues the worker should act on, falls back
to the configured queues.
"""
if not queues:
queues = self.queues
queues = [self.get_queue(name) for name in queues]
worker_cls = import_attribute(self.worker_class)
worker = worker_cls(
queues,
connection=self.connection,
job_class=self.job_class,
queue_class=self.queue_class,
)
for exception_handler in self._exception_handlers:
worker.push_exc_handler(import_attribute(exception_handler))
return worker
|
rq/Flask-RQ2
|
src/flask_rq2/app.py
|
RQ.job
|
python
|
def job(self, func_or_queue=None, timeout=None, result_ttl=None, ttl=None,
depends_on=None, at_front=None, meta=None, description=None):
if callable(func_or_queue):
func = func_or_queue
queue_name = None
else:
func = None
queue_name = func_or_queue
def wrapper(wrapped):
self._jobs.append(wrapped)
helper = self._functions_cls(
rq=self,
wrapped=wrapped,
queue_name=queue_name,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
at_front=at_front,
meta=meta,
description=description,
)
wrapped.helper = helper
for function in helper.functions:
callback = getattr(helper, function, None)
setattr(wrapped, function, callback)
return wrapped
if func is None:
return wrapper
else:
return wrapper(func)
|
Decorator to mark functions for queuing via RQ, e.g.::
rq = RQ()
@rq.job
def add(x, y):
return x + y
or::
@rq.job(timeout=60, result_ttl=60 * 60)
def add(x, y):
return x + y
Adds various functions to the job as documented in
:class:`~flask_rq2.functions.JobFunctions`.
.. versionchanged:: 18.0
Adds ``depends_on``, ``at_front``, ``meta`` and ``description``
parameters.
:param queue: Name of the queue to add job to, defaults to
:attr:`flask_rq2.app.RQ.default_queue`.
:type queue: str
:param timeout: The maximum runtime in seconds of the job before it's
considered 'lost', defaults to 180.
:type timeout: int
:param result_ttl: Time to persist the job results in Redis,
in seconds.
:type result_ttl: int
:param ttl: The maximum queued time of the job before it'll be
cancelled.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:param description: Description of the job.
:type description: str
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/app.py#L226-L308
|
[
"def wrapper(wrapped):\n self._jobs.append(wrapped)\n helper = self._functions_cls(\n rq=self,\n wrapped=wrapped,\n queue_name=queue_name,\n timeout=timeout,\n result_ttl=result_ttl,\n ttl=ttl,\n depends_on=depends_on,\n at_front=at_front,\n meta=meta,\n description=description,\n )\n wrapped.helper = helper\n for function in helper.functions:\n callback = getattr(helper, function, None)\n setattr(wrapped, function, callback)\n return wrapped\n"
] |
class RQ(object):
"""
The main RQ object to be used in user apps.
"""
#: Name of the default queue.
default_queue = 'default'
#: The fallback default timeout value.
default_timeout = Queue.DEFAULT_TIMEOUT
#: The fallback default result TTL.
#:
#: .. versionadded:: 17.1
default_result_ttl = DEFAULT_RESULT_TTL
#: The DSN (URL) of the Redis connection.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``url`` to ``redis_url``.
redis_url = 'redis://localhost:6379/0'
#: The Redis client class to use.
#:
#: .. versionadded:: 17.1
connection_class = 'redis.StrictRedis'
#: List of queue names for RQ to work on.
queues = [default_queue]
#: Dotted import path to RQ Queue class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``queue_path`` to ``queue_class``.
queue_class = 'rq.queue.Queue'
#: Dotted import path to RQ Workers class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``worker_path`` to ``worker_class``.
worker_class = 'rq.worker.Worker'
#: Dotted import path to RQ Job class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``job_path`` to ``job_class``.
job_class = 'flask_rq2.job.FlaskJob'
#: Dotted import path to RQ Scheduler class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``scheduler_path`` to ``scheduler_class``.
#:
#: .. versionchanged:: 18.0
#: Changed to use own scheduler class.
scheduler_class = 'flask_rq2.scheduler.FlaskScheduler'
#: Name of RQ queue to schedule jobs in by rq-scheduler.
scheduler_queue = default_queue
#: Time in seconds the scheduler checks for scheduled jobs
#: periodicically.
scheduler_interval = 60
#: The default job functions class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``functions_path`` to ``functions_class``.
#: Moved from ``flask_rq2.helpers.JobFunctions`` to
# ``flask_rq2.functions.JobFunctions``.
functions_class = 'flask_rq2.functions.JobFunctions'
def __init__(self, app=None, default_timeout=None, is_async=None,
**kwargs):
"""
Initialize the RQ interface.
:param app: Flask application
:type app: :class:`flask.Flask`
:param default_timeout: The default timeout in seconds to use for jobs,
defaults to RQ's default of 180 seconds per job
:type default_timeout: int
:param is_async: Whether or not to run jobs asynchronously or
in-process, defaults to ``True``
:type is_async: bool
"""
if default_timeout is not None:
self.default_timeout = default_timeout
self._is_async = is_async
if 'async' in kwargs:
self._is_async = kwargs['async']
warnings.warn('The `async` keyword is deprecated. '
'Use `is_async` instead', DeprecationWarning)
self._jobs = []
self._exception_handlers = []
self._queue_instances = {}
self._functions_cls = import_attribute(self.functions_class)
self._ready_to_connect = False
self._connection = None
if app is not None:
self.init_app(app)
@property
def connection(self):
if not self._ready_to_connect:
raise RuntimeError('Flask-RQ2 is not ready yet to connect to '
'Redis. Was it initialized with a Flask app?')
if self._connection is None:
self._connection = self._connect()
return self._connection
def _connect(self):
connection_class = import_attribute(self.connection_class)
return connection_class.from_url(self.redis_url)
def init_app(self, app):
"""
Initialize the app, e.g. can be used if factory pattern is used.
"""
# The connection related config values
self.redis_url = app.config.setdefault(
'RQ_REDIS_URL',
self.redis_url,
)
self.connection_class = app.config.setdefault(
'RQ_CONNECTION_CLASS',
self.connection_class,
)
# all infos to create a Redis connection are now avaiable.
self._ready_to_connect = True
self.queues = app.config.setdefault(
'RQ_QUEUES',
self.queues,
)
self.queue_class = app.config.setdefault(
'RQ_QUEUE_CLASS',
self.queue_class,
)
self.worker_class = app.config.setdefault(
'RQ_WORKER_CLASS',
self.worker_class,
)
self.job_class = app.config.setdefault(
'RQ_JOB_CLASS',
self.job_class,
)
self.scheduler_class = app.config.setdefault(
'RQ_SCHEDULER_CLASS',
self.scheduler_class,
)
self.scheduler_queue = app.config.setdefault(
'RQ_SCHEDULER_QUEUE',
self.scheduler_queue,
)
self.scheduler_interval = app.config.setdefault(
'RQ_SCHEDULER_INTERVAL',
self.scheduler_interval,
)
#: Whether or not to run RQ jobs asynchronously or not,
#: defaults to asynchronous
_async = app.config.setdefault('RQ_ASYNC', True)
if self._is_async is None:
self._is_async = _async
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['rq2'] = self
if hasattr(app, 'cli'):
self.init_cli(app)
def init_cli(self, app):
"""
Initialize the Flask CLI support in case it was enabled for the
app.
Works with both Flask>=1.0's CLI support as well as the backport
in the Flask-CLI package for Flask<1.0.
"""
# in case click isn't installed after all
if click is None:
raise RuntimeError('Cannot import click. Is it installed?')
# only add commands if we have a click context available
from .cli import add_commands
add_commands(app.cli, self)
def exception_handler(self, callback):
"""
Decorator to add an exception handler to the worker, e.g.::
rq = RQ()
@rq.exception_handler
def my_custom_handler(job, *exc_info):
# do custom things here
...
"""
path = '.'.join([callback.__module__, callback.__name__])
self._exception_handlers.append(path)
return callback
def get_scheduler(self, interval=None, queue=None):
"""
When installed returns a ``rq_scheduler.Scheduler`` instance to
schedule job execution, e.g.::
scheduler = rq.get_scheduler(interval=10)
:param interval: Time in seconds of the periodic check for scheduled
jobs.
:type interval: int
:param queue: Name of the queue to enqueue in, defaults to
:attr:`~flask_rq2.RQ.scheduler_queue`.
:type queue: str
"""
if interval is None:
interval = self.scheduler_interval
if not queue:
queue = self.scheduler_queue
scheduler_cls = import_attribute(self.scheduler_class)
scheduler = scheduler_cls(
queue_name=queue,
interval=interval,
connection=self.connection,
)
return scheduler
def get_queue(self, name=None):
"""
Returns an RQ queue instance with the given name, e.g.::
default_queue = rq.get_queue()
low_queue = rq.get_queue('low')
:param name: Name of the queue to return, defaults to
:attr:`~flask_rq2.RQ.default_queue`.
:type name: str
:return: An RQ queue instance.
:rtype: ``rq.queue.Queue``
"""
if not name:
name = self.default_queue
queue = self._queue_instances.get(name)
if queue is None:
queue_cls = import_attribute(self.queue_class)
queue = queue_cls(
name=name,
default_timeout=self.default_timeout,
is_async=self._is_async,
connection=self.connection,
job_class=self.job_class
)
self._queue_instances[name] = queue
return queue
def get_worker(self, *queues):
"""
Returns an RQ worker instance for the given queue names, e.g.::
configured_worker = rq.get_worker()
default_worker = rq.get_worker('default')
default_low_worker = rq.get_worker('default', 'low')
:param \\*queues: Names of queues the worker should act on, falls back
to the configured queues.
"""
if not queues:
queues = self.queues
queues = [self.get_queue(name) for name in queues]
worker_cls = import_attribute(self.worker_class)
worker = worker_cls(
queues,
connection=self.connection,
job_class=self.job_class,
queue_class=self.queue_class,
)
for exception_handler in self._exception_handlers:
worker.push_exc_handler(import_attribute(exception_handler))
return worker
|
rq/Flask-RQ2
|
src/flask_rq2/app.py
|
RQ.get_scheduler
|
python
|
def get_scheduler(self, interval=None, queue=None):
if interval is None:
interval = self.scheduler_interval
if not queue:
queue = self.scheduler_queue
scheduler_cls = import_attribute(self.scheduler_class)
scheduler = scheduler_cls(
queue_name=queue,
interval=interval,
connection=self.connection,
)
return scheduler
|
When installed returns a ``rq_scheduler.Scheduler`` instance to
schedule job execution, e.g.::
scheduler = rq.get_scheduler(interval=10)
:param interval: Time in seconds of the periodic check for scheduled
jobs.
:type interval: int
:param queue: Name of the queue to enqueue in, defaults to
:attr:`~flask_rq2.RQ.scheduler_queue`.
:type queue: str
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/app.py#L310-L337
| null |
class RQ(object):
"""
The main RQ object to be used in user apps.
"""
#: Name of the default queue.
default_queue = 'default'
#: The fallback default timeout value.
default_timeout = Queue.DEFAULT_TIMEOUT
#: The fallback default result TTL.
#:
#: .. versionadded:: 17.1
default_result_ttl = DEFAULT_RESULT_TTL
#: The DSN (URL) of the Redis connection.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``url`` to ``redis_url``.
redis_url = 'redis://localhost:6379/0'
#: The Redis client class to use.
#:
#: .. versionadded:: 17.1
connection_class = 'redis.StrictRedis'
#: List of queue names for RQ to work on.
queues = [default_queue]
#: Dotted import path to RQ Queue class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``queue_path`` to ``queue_class``.
queue_class = 'rq.queue.Queue'
#: Dotted import path to RQ Workers class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``worker_path`` to ``worker_class``.
worker_class = 'rq.worker.Worker'
#: Dotted import path to RQ Job class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``job_path`` to ``job_class``.
job_class = 'flask_rq2.job.FlaskJob'
#: Dotted import path to RQ Scheduler class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``scheduler_path`` to ``scheduler_class``.
#:
#: .. versionchanged:: 18.0
#: Changed to use own scheduler class.
scheduler_class = 'flask_rq2.scheduler.FlaskScheduler'
#: Name of RQ queue to schedule jobs in by rq-scheduler.
scheduler_queue = default_queue
#: Time in seconds the scheduler checks for scheduled jobs
#: periodicically.
scheduler_interval = 60
#: The default job functions class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``functions_path`` to ``functions_class``.
#: Moved from ``flask_rq2.helpers.JobFunctions`` to
# ``flask_rq2.functions.JobFunctions``.
functions_class = 'flask_rq2.functions.JobFunctions'
def __init__(self, app=None, default_timeout=None, is_async=None,
**kwargs):
"""
Initialize the RQ interface.
:param app: Flask application
:type app: :class:`flask.Flask`
:param default_timeout: The default timeout in seconds to use for jobs,
defaults to RQ's default of 180 seconds per job
:type default_timeout: int
:param is_async: Whether or not to run jobs asynchronously or
in-process, defaults to ``True``
:type is_async: bool
"""
if default_timeout is not None:
self.default_timeout = default_timeout
self._is_async = is_async
if 'async' in kwargs:
self._is_async = kwargs['async']
warnings.warn('The `async` keyword is deprecated. '
'Use `is_async` instead', DeprecationWarning)
self._jobs = []
self._exception_handlers = []
self._queue_instances = {}
self._functions_cls = import_attribute(self.functions_class)
self._ready_to_connect = False
self._connection = None
if app is not None:
self.init_app(app)
@property
def connection(self):
if not self._ready_to_connect:
raise RuntimeError('Flask-RQ2 is not ready yet to connect to '
'Redis. Was it initialized with a Flask app?')
if self._connection is None:
self._connection = self._connect()
return self._connection
def _connect(self):
connection_class = import_attribute(self.connection_class)
return connection_class.from_url(self.redis_url)
def init_app(self, app):
"""
Initialize the app, e.g. can be used if factory pattern is used.
"""
# The connection related config values
self.redis_url = app.config.setdefault(
'RQ_REDIS_URL',
self.redis_url,
)
self.connection_class = app.config.setdefault(
'RQ_CONNECTION_CLASS',
self.connection_class,
)
# all infos to create a Redis connection are now avaiable.
self._ready_to_connect = True
self.queues = app.config.setdefault(
'RQ_QUEUES',
self.queues,
)
self.queue_class = app.config.setdefault(
'RQ_QUEUE_CLASS',
self.queue_class,
)
self.worker_class = app.config.setdefault(
'RQ_WORKER_CLASS',
self.worker_class,
)
self.job_class = app.config.setdefault(
'RQ_JOB_CLASS',
self.job_class,
)
self.scheduler_class = app.config.setdefault(
'RQ_SCHEDULER_CLASS',
self.scheduler_class,
)
self.scheduler_queue = app.config.setdefault(
'RQ_SCHEDULER_QUEUE',
self.scheduler_queue,
)
self.scheduler_interval = app.config.setdefault(
'RQ_SCHEDULER_INTERVAL',
self.scheduler_interval,
)
#: Whether or not to run RQ jobs asynchronously or not,
#: defaults to asynchronous
_async = app.config.setdefault('RQ_ASYNC', True)
if self._is_async is None:
self._is_async = _async
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['rq2'] = self
if hasattr(app, 'cli'):
self.init_cli(app)
def init_cli(self, app):
"""
Initialize the Flask CLI support in case it was enabled for the
app.
Works with both Flask>=1.0's CLI support as well as the backport
in the Flask-CLI package for Flask<1.0.
"""
# in case click isn't installed after all
if click is None:
raise RuntimeError('Cannot import click. Is it installed?')
# only add commands if we have a click context available
from .cli import add_commands
add_commands(app.cli, self)
def exception_handler(self, callback):
"""
Decorator to add an exception handler to the worker, e.g.::
rq = RQ()
@rq.exception_handler
def my_custom_handler(job, *exc_info):
# do custom things here
...
"""
path = '.'.join([callback.__module__, callback.__name__])
self._exception_handlers.append(path)
return callback
def job(self, func_or_queue=None, timeout=None, result_ttl=None, ttl=None,
depends_on=None, at_front=None, meta=None, description=None):
"""
Decorator to mark functions for queuing via RQ, e.g.::
rq = RQ()
@rq.job
def add(x, y):
return x + y
or::
@rq.job(timeout=60, result_ttl=60 * 60)
def add(x, y):
return x + y
Adds various functions to the job as documented in
:class:`~flask_rq2.functions.JobFunctions`.
.. versionchanged:: 18.0
Adds ``depends_on``, ``at_front``, ``meta`` and ``description``
parameters.
:param queue: Name of the queue to add job to, defaults to
:attr:`flask_rq2.app.RQ.default_queue`.
:type queue: str
:param timeout: The maximum runtime in seconds of the job before it's
considered 'lost', defaults to 180.
:type timeout: int
:param result_ttl: Time to persist the job results in Redis,
in seconds.
:type result_ttl: int
:param ttl: The maximum queued time of the job before it'll be
cancelled.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:param description: Description of the job.
:type description: str
"""
if callable(func_or_queue):
func = func_or_queue
queue_name = None
else:
func = None
queue_name = func_or_queue
def wrapper(wrapped):
self._jobs.append(wrapped)
helper = self._functions_cls(
rq=self,
wrapped=wrapped,
queue_name=queue_name,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
at_front=at_front,
meta=meta,
description=description,
)
wrapped.helper = helper
for function in helper.functions:
callback = getattr(helper, function, None)
setattr(wrapped, function, callback)
return wrapped
if func is None:
return wrapper
else:
return wrapper(func)
def get_queue(self, name=None):
"""
Returns an RQ queue instance with the given name, e.g.::
default_queue = rq.get_queue()
low_queue = rq.get_queue('low')
:param name: Name of the queue to return, defaults to
:attr:`~flask_rq2.RQ.default_queue`.
:type name: str
:return: An RQ queue instance.
:rtype: ``rq.queue.Queue``
"""
if not name:
name = self.default_queue
queue = self._queue_instances.get(name)
if queue is None:
queue_cls = import_attribute(self.queue_class)
queue = queue_cls(
name=name,
default_timeout=self.default_timeout,
is_async=self._is_async,
connection=self.connection,
job_class=self.job_class
)
self._queue_instances[name] = queue
return queue
def get_worker(self, *queues):
"""
Returns an RQ worker instance for the given queue names, e.g.::
configured_worker = rq.get_worker()
default_worker = rq.get_worker('default')
default_low_worker = rq.get_worker('default', 'low')
:param \\*queues: Names of queues the worker should act on, falls back
to the configured queues.
"""
if not queues:
queues = self.queues
queues = [self.get_queue(name) for name in queues]
worker_cls = import_attribute(self.worker_class)
worker = worker_cls(
queues,
connection=self.connection,
job_class=self.job_class,
queue_class=self.queue_class,
)
for exception_handler in self._exception_handlers:
worker.push_exc_handler(import_attribute(exception_handler))
return worker
|
rq/Flask-RQ2
|
src/flask_rq2/app.py
|
RQ.get_queue
|
python
|
def get_queue(self, name=None):
if not name:
name = self.default_queue
queue = self._queue_instances.get(name)
if queue is None:
queue_cls = import_attribute(self.queue_class)
queue = queue_cls(
name=name,
default_timeout=self.default_timeout,
is_async=self._is_async,
connection=self.connection,
job_class=self.job_class
)
self._queue_instances[name] = queue
return queue
|
Returns an RQ queue instance with the given name, e.g.::
default_queue = rq.get_queue()
low_queue = rq.get_queue('low')
:param name: Name of the queue to return, defaults to
:attr:`~flask_rq2.RQ.default_queue`.
:type name: str
:return: An RQ queue instance.
:rtype: ``rq.queue.Queue``
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/app.py#L339-L365
| null |
class RQ(object):
"""
The main RQ object to be used in user apps.
"""
#: Name of the default queue.
default_queue = 'default'
#: The fallback default timeout value.
default_timeout = Queue.DEFAULT_TIMEOUT
#: The fallback default result TTL.
#:
#: .. versionadded:: 17.1
default_result_ttl = DEFAULT_RESULT_TTL
#: The DSN (URL) of the Redis connection.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``url`` to ``redis_url``.
redis_url = 'redis://localhost:6379/0'
#: The Redis client class to use.
#:
#: .. versionadded:: 17.1
connection_class = 'redis.StrictRedis'
#: List of queue names for RQ to work on.
queues = [default_queue]
#: Dotted import path to RQ Queue class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``queue_path`` to ``queue_class``.
queue_class = 'rq.queue.Queue'
#: Dotted import path to RQ Workers class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``worker_path`` to ``worker_class``.
worker_class = 'rq.worker.Worker'
#: Dotted import path to RQ Job class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``job_path`` to ``job_class``.
job_class = 'flask_rq2.job.FlaskJob'
#: Dotted import path to RQ Scheduler class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``scheduler_path`` to ``scheduler_class``.
#:
#: .. versionchanged:: 18.0
#: Changed to use own scheduler class.
scheduler_class = 'flask_rq2.scheduler.FlaskScheduler'
#: Name of RQ queue to schedule jobs in by rq-scheduler.
scheduler_queue = default_queue
#: Time in seconds the scheduler checks for scheduled jobs
#: periodicically.
scheduler_interval = 60
#: The default job functions class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``functions_path`` to ``functions_class``.
#: Moved from ``flask_rq2.helpers.JobFunctions`` to
# ``flask_rq2.functions.JobFunctions``.
functions_class = 'flask_rq2.functions.JobFunctions'
def __init__(self, app=None, default_timeout=None, is_async=None,
**kwargs):
"""
Initialize the RQ interface.
:param app: Flask application
:type app: :class:`flask.Flask`
:param default_timeout: The default timeout in seconds to use for jobs,
defaults to RQ's default of 180 seconds per job
:type default_timeout: int
:param is_async: Whether or not to run jobs asynchronously or
in-process, defaults to ``True``
:type is_async: bool
"""
if default_timeout is not None:
self.default_timeout = default_timeout
self._is_async = is_async
if 'async' in kwargs:
self._is_async = kwargs['async']
warnings.warn('The `async` keyword is deprecated. '
'Use `is_async` instead', DeprecationWarning)
self._jobs = []
self._exception_handlers = []
self._queue_instances = {}
self._functions_cls = import_attribute(self.functions_class)
self._ready_to_connect = False
self._connection = None
if app is not None:
self.init_app(app)
@property
def connection(self):
if not self._ready_to_connect:
raise RuntimeError('Flask-RQ2 is not ready yet to connect to '
'Redis. Was it initialized with a Flask app?')
if self._connection is None:
self._connection = self._connect()
return self._connection
def _connect(self):
connection_class = import_attribute(self.connection_class)
return connection_class.from_url(self.redis_url)
def init_app(self, app):
"""
Initialize the app, e.g. can be used if factory pattern is used.
"""
# The connection related config values
self.redis_url = app.config.setdefault(
'RQ_REDIS_URL',
self.redis_url,
)
self.connection_class = app.config.setdefault(
'RQ_CONNECTION_CLASS',
self.connection_class,
)
# all infos to create a Redis connection are now avaiable.
self._ready_to_connect = True
self.queues = app.config.setdefault(
'RQ_QUEUES',
self.queues,
)
self.queue_class = app.config.setdefault(
'RQ_QUEUE_CLASS',
self.queue_class,
)
self.worker_class = app.config.setdefault(
'RQ_WORKER_CLASS',
self.worker_class,
)
self.job_class = app.config.setdefault(
'RQ_JOB_CLASS',
self.job_class,
)
self.scheduler_class = app.config.setdefault(
'RQ_SCHEDULER_CLASS',
self.scheduler_class,
)
self.scheduler_queue = app.config.setdefault(
'RQ_SCHEDULER_QUEUE',
self.scheduler_queue,
)
self.scheduler_interval = app.config.setdefault(
'RQ_SCHEDULER_INTERVAL',
self.scheduler_interval,
)
#: Whether or not to run RQ jobs asynchronously or not,
#: defaults to asynchronous
_async = app.config.setdefault('RQ_ASYNC', True)
if self._is_async is None:
self._is_async = _async
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['rq2'] = self
if hasattr(app, 'cli'):
self.init_cli(app)
def init_cli(self, app):
"""
Initialize the Flask CLI support in case it was enabled for the
app.
Works with both Flask>=1.0's CLI support as well as the backport
in the Flask-CLI package for Flask<1.0.
"""
# in case click isn't installed after all
if click is None:
raise RuntimeError('Cannot import click. Is it installed?')
# only add commands if we have a click context available
from .cli import add_commands
add_commands(app.cli, self)
def exception_handler(self, callback):
"""
Decorator to add an exception handler to the worker, e.g.::
rq = RQ()
@rq.exception_handler
def my_custom_handler(job, *exc_info):
# do custom things here
...
"""
path = '.'.join([callback.__module__, callback.__name__])
self._exception_handlers.append(path)
return callback
def job(self, func_or_queue=None, timeout=None, result_ttl=None, ttl=None,
depends_on=None, at_front=None, meta=None, description=None):
"""
Decorator to mark functions for queuing via RQ, e.g.::
rq = RQ()
@rq.job
def add(x, y):
return x + y
or::
@rq.job(timeout=60, result_ttl=60 * 60)
def add(x, y):
return x + y
Adds various functions to the job as documented in
:class:`~flask_rq2.functions.JobFunctions`.
.. versionchanged:: 18.0
Adds ``depends_on``, ``at_front``, ``meta`` and ``description``
parameters.
:param queue: Name of the queue to add job to, defaults to
:attr:`flask_rq2.app.RQ.default_queue`.
:type queue: str
:param timeout: The maximum runtime in seconds of the job before it's
considered 'lost', defaults to 180.
:type timeout: int
:param result_ttl: Time to persist the job results in Redis,
in seconds.
:type result_ttl: int
:param ttl: The maximum queued time of the job before it'll be
cancelled.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:param description: Description of the job.
:type description: str
"""
if callable(func_or_queue):
func = func_or_queue
queue_name = None
else:
func = None
queue_name = func_or_queue
def wrapper(wrapped):
self._jobs.append(wrapped)
helper = self._functions_cls(
rq=self,
wrapped=wrapped,
queue_name=queue_name,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
at_front=at_front,
meta=meta,
description=description,
)
wrapped.helper = helper
for function in helper.functions:
callback = getattr(helper, function, None)
setattr(wrapped, function, callback)
return wrapped
if func is None:
return wrapper
else:
return wrapper(func)
def get_scheduler(self, interval=None, queue=None):
"""
When installed returns a ``rq_scheduler.Scheduler`` instance to
schedule job execution, e.g.::
scheduler = rq.get_scheduler(interval=10)
:param interval: Time in seconds of the periodic check for scheduled
jobs.
:type interval: int
:param queue: Name of the queue to enqueue in, defaults to
:attr:`~flask_rq2.RQ.scheduler_queue`.
:type queue: str
"""
if interval is None:
interval = self.scheduler_interval
if not queue:
queue = self.scheduler_queue
scheduler_cls = import_attribute(self.scheduler_class)
scheduler = scheduler_cls(
queue_name=queue,
interval=interval,
connection=self.connection,
)
return scheduler
def get_worker(self, *queues):
"""
Returns an RQ worker instance for the given queue names, e.g.::
configured_worker = rq.get_worker()
default_worker = rq.get_worker('default')
default_low_worker = rq.get_worker('default', 'low')
:param \\*queues: Names of queues the worker should act on, falls back
to the configured queues.
"""
if not queues:
queues = self.queues
queues = [self.get_queue(name) for name in queues]
worker_cls = import_attribute(self.worker_class)
worker = worker_cls(
queues,
connection=self.connection,
job_class=self.job_class,
queue_class=self.queue_class,
)
for exception_handler in self._exception_handlers:
worker.push_exc_handler(import_attribute(exception_handler))
return worker
|
rq/Flask-RQ2
|
src/flask_rq2/app.py
|
RQ.get_worker
|
python
|
def get_worker(self, *queues):
if not queues:
queues = self.queues
queues = [self.get_queue(name) for name in queues]
worker_cls = import_attribute(self.worker_class)
worker = worker_cls(
queues,
connection=self.connection,
job_class=self.job_class,
queue_class=self.queue_class,
)
for exception_handler in self._exception_handlers:
worker.push_exc_handler(import_attribute(exception_handler))
return worker
|
Returns an RQ worker instance for the given queue names, e.g.::
configured_worker = rq.get_worker()
default_worker = rq.get_worker('default')
default_low_worker = rq.get_worker('default', 'low')
:param \\*queues: Names of queues the worker should act on, falls back
to the configured queues.
|
train
|
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/app.py#L367-L390
| null |
class RQ(object):
"""
The main RQ object to be used in user apps.
"""
#: Name of the default queue.
default_queue = 'default'
#: The fallback default timeout value.
default_timeout = Queue.DEFAULT_TIMEOUT
#: The fallback default result TTL.
#:
#: .. versionadded:: 17.1
default_result_ttl = DEFAULT_RESULT_TTL
#: The DSN (URL) of the Redis connection.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``url`` to ``redis_url``.
redis_url = 'redis://localhost:6379/0'
#: The Redis client class to use.
#:
#: .. versionadded:: 17.1
connection_class = 'redis.StrictRedis'
#: List of queue names for RQ to work on.
queues = [default_queue]
#: Dotted import path to RQ Queue class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``queue_path`` to ``queue_class``.
queue_class = 'rq.queue.Queue'
#: Dotted import path to RQ Workers class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``worker_path`` to ``worker_class``.
worker_class = 'rq.worker.Worker'
#: Dotted import path to RQ Job class to use as base class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``job_path`` to ``job_class``.
job_class = 'flask_rq2.job.FlaskJob'
#: Dotted import path to RQ Scheduler class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``scheduler_path`` to ``scheduler_class``.
#:
#: .. versionchanged:: 18.0
#: Changed to use own scheduler class.
scheduler_class = 'flask_rq2.scheduler.FlaskScheduler'
#: Name of RQ queue to schedule jobs in by rq-scheduler.
scheduler_queue = default_queue
#: Time in seconds the scheduler checks for scheduled jobs
#: periodicically.
scheduler_interval = 60
#: The default job functions class.
#:
#: .. versionchanged:: 17.1
#: Renamed from ``functions_path`` to ``functions_class``.
#: Moved from ``flask_rq2.helpers.JobFunctions`` to
# ``flask_rq2.functions.JobFunctions``.
functions_class = 'flask_rq2.functions.JobFunctions'
def __init__(self, app=None, default_timeout=None, is_async=None,
**kwargs):
"""
Initialize the RQ interface.
:param app: Flask application
:type app: :class:`flask.Flask`
:param default_timeout: The default timeout in seconds to use for jobs,
defaults to RQ's default of 180 seconds per job
:type default_timeout: int
:param is_async: Whether or not to run jobs asynchronously or
in-process, defaults to ``True``
:type is_async: bool
"""
if default_timeout is not None:
self.default_timeout = default_timeout
self._is_async = is_async
if 'async' in kwargs:
self._is_async = kwargs['async']
warnings.warn('The `async` keyword is deprecated. '
'Use `is_async` instead', DeprecationWarning)
self._jobs = []
self._exception_handlers = []
self._queue_instances = {}
self._functions_cls = import_attribute(self.functions_class)
self._ready_to_connect = False
self._connection = None
if app is not None:
self.init_app(app)
@property
def connection(self):
if not self._ready_to_connect:
raise RuntimeError('Flask-RQ2 is not ready yet to connect to '
'Redis. Was it initialized with a Flask app?')
if self._connection is None:
self._connection = self._connect()
return self._connection
def _connect(self):
connection_class = import_attribute(self.connection_class)
return connection_class.from_url(self.redis_url)
def init_app(self, app):
"""
Initialize the app, e.g. can be used if factory pattern is used.
"""
# The connection related config values
self.redis_url = app.config.setdefault(
'RQ_REDIS_URL',
self.redis_url,
)
self.connection_class = app.config.setdefault(
'RQ_CONNECTION_CLASS',
self.connection_class,
)
# all infos to create a Redis connection are now avaiable.
self._ready_to_connect = True
self.queues = app.config.setdefault(
'RQ_QUEUES',
self.queues,
)
self.queue_class = app.config.setdefault(
'RQ_QUEUE_CLASS',
self.queue_class,
)
self.worker_class = app.config.setdefault(
'RQ_WORKER_CLASS',
self.worker_class,
)
self.job_class = app.config.setdefault(
'RQ_JOB_CLASS',
self.job_class,
)
self.scheduler_class = app.config.setdefault(
'RQ_SCHEDULER_CLASS',
self.scheduler_class,
)
self.scheduler_queue = app.config.setdefault(
'RQ_SCHEDULER_QUEUE',
self.scheduler_queue,
)
self.scheduler_interval = app.config.setdefault(
'RQ_SCHEDULER_INTERVAL',
self.scheduler_interval,
)
#: Whether or not to run RQ jobs asynchronously or not,
#: defaults to asynchronous
_async = app.config.setdefault('RQ_ASYNC', True)
if self._is_async is None:
self._is_async = _async
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['rq2'] = self
if hasattr(app, 'cli'):
self.init_cli(app)
def init_cli(self, app):
"""
Initialize the Flask CLI support in case it was enabled for the
app.
Works with both Flask>=1.0's CLI support as well as the backport
in the Flask-CLI package for Flask<1.0.
"""
# in case click isn't installed after all
if click is None:
raise RuntimeError('Cannot import click. Is it installed?')
# only add commands if we have a click context available
from .cli import add_commands
add_commands(app.cli, self)
def exception_handler(self, callback):
"""
Decorator to add an exception handler to the worker, e.g.::
rq = RQ()
@rq.exception_handler
def my_custom_handler(job, *exc_info):
# do custom things here
...
"""
path = '.'.join([callback.__module__, callback.__name__])
self._exception_handlers.append(path)
return callback
def job(self, func_or_queue=None, timeout=None, result_ttl=None, ttl=None,
depends_on=None, at_front=None, meta=None, description=None):
"""
Decorator to mark functions for queuing via RQ, e.g.::
rq = RQ()
@rq.job
def add(x, y):
return x + y
or::
@rq.job(timeout=60, result_ttl=60 * 60)
def add(x, y):
return x + y
Adds various functions to the job as documented in
:class:`~flask_rq2.functions.JobFunctions`.
.. versionchanged:: 18.0
Adds ``depends_on``, ``at_front``, ``meta`` and ``description``
parameters.
:param queue: Name of the queue to add job to, defaults to
:attr:`flask_rq2.app.RQ.default_queue`.
:type queue: str
:param timeout: The maximum runtime in seconds of the job before it's
considered 'lost', defaults to 180.
:type timeout: int
:param result_ttl: Time to persist the job results in Redis,
in seconds.
:type result_ttl: int
:param ttl: The maximum queued time of the job before it'll be
cancelled.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:param description: Description of the job.
:type description: str
"""
if callable(func_or_queue):
func = func_or_queue
queue_name = None
else:
func = None
queue_name = func_or_queue
def wrapper(wrapped):
self._jobs.append(wrapped)
helper = self._functions_cls(
rq=self,
wrapped=wrapped,
queue_name=queue_name,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
at_front=at_front,
meta=meta,
description=description,
)
wrapped.helper = helper
for function in helper.functions:
callback = getattr(helper, function, None)
setattr(wrapped, function, callback)
return wrapped
if func is None:
return wrapper
else:
return wrapper(func)
def get_scheduler(self, interval=None, queue=None):
"""
When installed returns a ``rq_scheduler.Scheduler`` instance to
schedule job execution, e.g.::
scheduler = rq.get_scheduler(interval=10)
:param interval: Time in seconds of the periodic check for scheduled
jobs.
:type interval: int
:param queue: Name of the queue to enqueue in, defaults to
:attr:`~flask_rq2.RQ.scheduler_queue`.
:type queue: str
"""
if interval is None:
interval = self.scheduler_interval
if not queue:
queue = self.scheduler_queue
scheduler_cls = import_attribute(self.scheduler_class)
scheduler = scheduler_cls(
queue_name=queue,
interval=interval,
connection=self.connection,
)
return scheduler
def get_queue(self, name=None):
"""
Returns an RQ queue instance with the given name, e.g.::
default_queue = rq.get_queue()
low_queue = rq.get_queue('low')
:param name: Name of the queue to return, defaults to
:attr:`~flask_rq2.RQ.default_queue`.
:type name: str
:return: An RQ queue instance.
:rtype: ``rq.queue.Queue``
"""
if not name:
name = self.default_queue
queue = self._queue_instances.get(name)
if queue is None:
queue_cls = import_attribute(self.queue_class)
queue = queue_cls(
name=name,
default_timeout=self.default_timeout,
is_async=self._is_async,
connection=self.connection,
job_class=self.job_class
)
self._queue_instances[name] = queue
return queue
|
aio-libs/aiohttp_admin
|
aiohttp_admin/admin.py
|
setup_admin_on_rest_handlers
|
python
|
def setup_admin_on_rest_handlers(admin, admin_handler):
add_route = admin.router.add_route
add_static = admin.router.add_static
static_folder = str(PROJ_ROOT / 'static')
a = admin_handler
add_route('GET', '', a.index_page, name='admin.index')
add_route('POST', '/token', a.token, name='admin.token')
add_static('/static', path=static_folder, name='admin.static')
add_route('DELETE', '/logout', a.logout, name='admin.logout')
|
Initialize routes.
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/admin.py#L148-L160
| null |
from aiohttp_jinja2 import render_template
from aiohttp_security import remember, forget
from yarl import URL
from .consts import TEMPLATE_APP_KEY, PROJ_ROOT
from .exceptions import JsonValidaitonError
from .security import authorize
from .utils import json_response, validate_payload, LoginForm
__all__ = [
'AdminHandler', 'setup_admin_handlers', 'setup_admin_on_rest_handlers',
'AdminOnRestHandler',
]
class AdminHandler:
def __init__(self, admin, *, resources, name=None, template=None, loop):
self._admin = admin
self._loop = loop
self._name = name or 'aiohttp_admin'
self._template = template or 'admin.html'
self._login_template = 'login.html'
for r in resources:
r.setup(self._admin, URL('/'))
self._resources = tuple(resources)
@property
def template(self):
return self._template
@property
def name(self):
return self._name
@property
def resources(self):
return self._resources
async def index_page(self, request):
t = self._template
context = {'name': self._name}
return render_template(t, request, context, app_key=TEMPLATE_APP_KEY)
async def login_page(self, request):
t = self._login_template
context = {}
return render_template(t, request, context, app_key=TEMPLATE_APP_KEY)
async def token(self, request):
raw_payload = await request.read()
data = validate_payload(raw_payload, LoginForm)
await authorize(request, data['username'], data['password'])
router = request.app.router
location = router["admin.index"].url_for().human_repr()
payload = {"location": location}
response = json_response(payload)
await remember(request, response, data['username'])
return response
async def logout(self, request):
if "Authorization" not in request.headers:
msg = "Auth header is not present, can not destroy token"
raise JsonValidaitonError(msg)
router = request.app.router
location = router["admin.login"].url_for().human_repr()
payload = {"location": location}
response = json_response(payload)
await forget(request, response)
return response
def setup_admin_handlers(admin, admin_handler, static_folder, admin_conf_path):
add_route = admin.router.add_route
add_static = admin.router.add_static
a = admin_handler
add_route('GET', '', a.index_page, name='admin.index')
add_route('GET', '/login', a.login_page, name='admin.login')
add_route('POST', '/token', a.token, name='admin.token')
add_route('DELETE', '/logout', a.logout, name='admin.logout')
add_static('/static', path=static_folder, name='admin.static')
add_static('/config', path=admin_conf_path, name='admin.config')
class AdminOnRestHandler:
template = 'admin_on_rest.jinja2'
def __init__(self, admin, *, resources, loop, schema):
self._admin = admin
self._loop = loop
self.schema = schema
for r in resources:
r.setup(self._admin, URL('/'))
self._resources = tuple(resources)
@property
def resources(self):
return self._resources
async def index_page(self, request):
"""
Return index page with initial state for admin
"""
context = {"initial_state": self.schema.to_json()}
return render_template(
self.template,
request,
context,
app_key=TEMPLATE_APP_KEY,
)
async def token(self, request):
"""
Validation of user data and generate auth token
"""
raw_payload = await request.read()
data = validate_payload(raw_payload, LoginForm)
await authorize(request, data['username'], data['password'])
router = request.app.router
location = router["admin.index"].url_for().human_repr()
payload = {"location": location}
response = json_response(payload)
await remember(request, response, data['username'])
return response
async def logout(self, request):
"""
Simple handler for logout
"""
if "Authorization" not in request.headers:
msg = "Auth header is not present, can not destroy token"
raise JsonValidaitonError(msg)
response = json_response()
await forget(request, response)
return response
|
aio-libs/aiohttp_admin
|
aiohttp_admin/admin.py
|
AdminOnRestHandler.index_page
|
python
|
async def index_page(self, request):
context = {"initial_state": self.schema.to_json()}
return render_template(
self.template,
request,
context,
app_key=TEMPLATE_APP_KEY,
)
|
Return index page with initial state for admin
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/admin.py#L105-L116
| null |
class AdminOnRestHandler:
template = 'admin_on_rest.jinja2'
def __init__(self, admin, *, resources, loop, schema):
self._admin = admin
self._loop = loop
self.schema = schema
for r in resources:
r.setup(self._admin, URL('/'))
self._resources = tuple(resources)
@property
def resources(self):
return self._resources
async def token(self, request):
"""
Validation of user data and generate auth token
"""
raw_payload = await request.read()
data = validate_payload(raw_payload, LoginForm)
await authorize(request, data['username'], data['password'])
router = request.app.router
location = router["admin.index"].url_for().human_repr()
payload = {"location": location}
response = json_response(payload)
await remember(request, response, data['username'])
return response
async def logout(self, request):
"""
Simple handler for logout
"""
if "Authorization" not in request.headers:
msg = "Auth header is not present, can not destroy token"
raise JsonValidaitonError(msg)
response = json_response()
await forget(request, response)
return response
|
aio-libs/aiohttp_admin
|
aiohttp_admin/admin.py
|
AdminOnRestHandler.logout
|
python
|
async def logout(self, request):
if "Authorization" not in request.headers:
msg = "Auth header is not present, can not destroy token"
raise JsonValidaitonError(msg)
response = json_response()
await forget(request, response)
return response
|
Simple handler for logout
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/admin.py#L134-L145
| null |
class AdminOnRestHandler:
template = 'admin_on_rest.jinja2'
def __init__(self, admin, *, resources, loop, schema):
self._admin = admin
self._loop = loop
self.schema = schema
for r in resources:
r.setup(self._admin, URL('/'))
self._resources = tuple(resources)
@property
def resources(self):
return self._resources
async def index_page(self, request):
"""
Return index page with initial state for admin
"""
context = {"initial_state": self.schema.to_json()}
return render_template(
self.template,
request,
context,
app_key=TEMPLATE_APP_KEY,
)
async def token(self, request):
"""
Validation of user data and generate auth token
"""
raw_payload = await request.read()
data = validate_payload(raw_payload, LoginForm)
await authorize(request, data['username'], data['password'])
router = request.app.router
location = router["admin.index"].url_for().human_repr()
payload = {"location": location}
response = json_response(payload)
await remember(request, response, data['username'])
return response
|
aio-libs/aiohttp_admin
|
aiohttp_admin/utils.py
|
json_datetime_serial
|
python
|
def json_datetime_serial(obj):
if isinstance(obj, (datetime, date)):
serial = obj.isoformat()
return serial
if ObjectId is not None and isinstance(obj, ObjectId):
# TODO: try to use bson.json_util instead
return str(obj)
raise TypeError("Type not serializable")
|
JSON serializer for objects not serializable by default json code
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/utils.py#L29-L39
| null |
import json
from collections import namedtuple
from datetime import datetime, date
from functools import partial
from types import MappingProxyType
import trafaret as t
from aiohttp import web
from .exceptions import JsonValidaitonError
from .consts import TEMPLATES_ROOT
try:
from bson import ObjectId
except ImportError: # pragma: no cover
ObjectId = None
__all__ = ['json_response', 'jsonify', 'validate_query', 'validate_payload',
'gather_template_folders']
PagingParams = namedtuple('PagingParams',
['limit', 'offset', 'sort_field', 'sort_dir'])
MULTI_FIELD_TEXT_QUERY = 'q'
jsonify = partial(json.dumps, default=json_datetime_serial)
json_response = partial(web.json_response, dumps=jsonify)
OptKey = partial(t.Key, optional=True)
SimpleType = t.IntRaw | t.Bool | t.String | t.FloatRaw
Filter = t.Dict({
OptKey('in'): t.List(SimpleType),
OptKey('gt'): SimpleType,
OptKey('ge'): SimpleType,
OptKey('lt'): SimpleType,
OptKey('le'): SimpleType,
OptKey('ne'): SimpleType,
OptKey('eq'): SimpleType,
OptKey('like'): SimpleType,
})
ASC = 'ASC'
DESC = 'DESC'
ListQuery = t.Dict({
OptKey('_page', default=1): t.Int[1:],
OptKey('_perPage', default=30): t.Int[1:],
OptKey('_sortField'): t.String,
OptKey('_sortDir', default=DESC): t.Enum(DESC, ASC),
OptKey('_filters'): t.Mapping(t.String, Filter | SimpleType)
})
LoginForm = t.Dict({
"username": t.String,
"password": t.String,
})
def validate_query_structure(query):
"""Validate query arguments in list request.
:param query: mapping with pagination and filtering information
"""
query_dict = dict(query)
filters = query_dict.pop('_filters', None)
if filters:
try:
f = json.loads(filters)
except ValueError:
msg = '_filters field can not be serialized'
raise JsonValidaitonError(msg)
else:
query_dict['_filters'] = f
try:
q = ListQuery(query_dict)
except t.DataError as exc:
msg = '_filters query invalid'
raise JsonValidaitonError(msg, **as_dict(exc))
return q
def validate_payload(raw_payload, schema):
payload = raw_payload.decode(encoding='UTF-8')
try:
parsed = json.loads(payload)
except ValueError:
raise JsonValidaitonError('Payload is not json serialisable')
try:
data = schema(parsed)
except t.DataError as exc:
raise JsonValidaitonError(**as_dict(exc))
return data
def gather_template_folders(template_folder):
# gather template folders: default and provided
if not isinstance(template_folder, list):
template_folder = [template_folder]
template_root = str(TEMPLATES_ROOT)
if template_folder is None:
template_folders = [template_root]
else:
template_folders = [template_root] + template_folder
return template_folders
def validate_query(query, possible_columns):
q = validate_query_structure(query)
sort_field = q.get('_sortField')
filters = q.get('_filters', [])
columns = [field_name for field_name in filters]
if sort_field is not None:
columns.append(sort_field)
not_valid = set(columns).difference(
possible_columns + [MULTI_FIELD_TEXT_QUERY])
if not_valid:
column_list = ', '.join(not_valid)
msg = 'Columns: {} do not present in resource'.format(column_list)
raise JsonValidaitonError(msg)
return MappingProxyType(q)
def calc_pagination(query_dict, default_sort_direction):
q = query_dict
page = q['_page']
sort_field = q.get('_sortField', default_sort_direction)
per_page = q['_perPage']
sort_dir = q['_sortDir']
offset = (page - 1) * per_page
limit = per_page
return PagingParams(limit, offset, sort_field, sort_dir)
def as_dict(exc, value=None):
result = exc.as_dict(value)
if isinstance(result, str):
return {"error": result}
return result
|
aio-libs/aiohttp_admin
|
aiohttp_admin/utils.py
|
validate_query_structure
|
python
|
def validate_query_structure(query):
query_dict = dict(query)
filters = query_dict.pop('_filters', None)
if filters:
try:
f = json.loads(filters)
except ValueError:
msg = '_filters field can not be serialized'
raise JsonValidaitonError(msg)
else:
query_dict['_filters'] = f
try:
q = ListQuery(query_dict)
except t.DataError as exc:
msg = '_filters query invalid'
raise JsonValidaitonError(msg, **as_dict(exc))
return q
|
Validate query arguments in list request.
:param query: mapping with pagination and filtering information
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/utils.py#L82-L103
|
[
"def as_dict(exc, value=None):\n result = exc.as_dict(value)\n if isinstance(result, str):\n return {\"error\": result}\n return result\n"
] |
import json
from collections import namedtuple
from datetime import datetime, date
from functools import partial
from types import MappingProxyType
import trafaret as t
from aiohttp import web
from .exceptions import JsonValidaitonError
from .consts import TEMPLATES_ROOT
try:
from bson import ObjectId
except ImportError: # pragma: no cover
ObjectId = None
__all__ = ['json_response', 'jsonify', 'validate_query', 'validate_payload',
'gather_template_folders']
PagingParams = namedtuple('PagingParams',
['limit', 'offset', 'sort_field', 'sort_dir'])
MULTI_FIELD_TEXT_QUERY = 'q'
def json_datetime_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
serial = obj.isoformat()
return serial
if ObjectId is not None and isinstance(obj, ObjectId):
# TODO: try to use bson.json_util instead
return str(obj)
raise TypeError("Type not serializable")
jsonify = partial(json.dumps, default=json_datetime_serial)
json_response = partial(web.json_response, dumps=jsonify)
OptKey = partial(t.Key, optional=True)
SimpleType = t.IntRaw | t.Bool | t.String | t.FloatRaw
Filter = t.Dict({
OptKey('in'): t.List(SimpleType),
OptKey('gt'): SimpleType,
OptKey('ge'): SimpleType,
OptKey('lt'): SimpleType,
OptKey('le'): SimpleType,
OptKey('ne'): SimpleType,
OptKey('eq'): SimpleType,
OptKey('like'): SimpleType,
})
ASC = 'ASC'
DESC = 'DESC'
ListQuery = t.Dict({
OptKey('_page', default=1): t.Int[1:],
OptKey('_perPage', default=30): t.Int[1:],
OptKey('_sortField'): t.String,
OptKey('_sortDir', default=DESC): t.Enum(DESC, ASC),
OptKey('_filters'): t.Mapping(t.String, Filter | SimpleType)
})
LoginForm = t.Dict({
"username": t.String,
"password": t.String,
})
def validate_payload(raw_payload, schema):
payload = raw_payload.decode(encoding='UTF-8')
try:
parsed = json.loads(payload)
except ValueError:
raise JsonValidaitonError('Payload is not json serialisable')
try:
data = schema(parsed)
except t.DataError as exc:
raise JsonValidaitonError(**as_dict(exc))
return data
def gather_template_folders(template_folder):
# gather template folders: default and provided
if not isinstance(template_folder, list):
template_folder = [template_folder]
template_root = str(TEMPLATES_ROOT)
if template_folder is None:
template_folders = [template_root]
else:
template_folders = [template_root] + template_folder
return template_folders
def validate_query(query, possible_columns):
q = validate_query_structure(query)
sort_field = q.get('_sortField')
filters = q.get('_filters', [])
columns = [field_name for field_name in filters]
if sort_field is not None:
columns.append(sort_field)
not_valid = set(columns).difference(
possible_columns + [MULTI_FIELD_TEXT_QUERY])
if not_valid:
column_list = ', '.join(not_valid)
msg = 'Columns: {} do not present in resource'.format(column_list)
raise JsonValidaitonError(msg)
return MappingProxyType(q)
def calc_pagination(query_dict, default_sort_direction):
q = query_dict
page = q['_page']
sort_field = q.get('_sortField', default_sort_direction)
per_page = q['_perPage']
sort_dir = q['_sortDir']
offset = (page - 1) * per_page
limit = per_page
return PagingParams(limit, offset, sort_field, sort_dir)
def as_dict(exc, value=None):
result = exc.as_dict(value)
if isinstance(result, str):
return {"error": result}
return result
|
aio-libs/aiohttp_admin
|
aiohttp_admin/contrib/admin.py
|
Schema.to_json
|
python
|
def to_json(self):
endpoints = []
for endpoint in self.endpoints:
list_fields = endpoint.fields
resource_type = endpoint.Meta.resource_type
table = endpoint.Meta.table
data = endpoint.to_dict()
data['fields'] = resource_type.get_type_of_fields(
list_fields,
table,
)
endpoints.append(data)
data = {
'title': self.title,
'endpoints': sorted(endpoints, key=lambda x: x['name']),
}
return json.dumps(data)
|
Prepare data for the initial state of the admin-on-rest
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/contrib/admin.py#L30-L52
| null |
class Schema:
"""
The main abstraction for registering tables and presenting data in
admin-on-rest format.
"""
def __init__(self, title='Admin'):
self.title = title
self.endpoints = []
def register(self, Endpoint):
"""
Register a wrapped `ModelAdmin` class as the endpoint for admin page.
@schema.register
class User(admin.ModelAdmin):
pass
"""
self.endpoints.append(Endpoint())
return Endpoint
@property
def resources(self):
"""
Return list of all registered resources.
"""
resources = []
for endpoint in self.endpoints:
resource_type = endpoint.Meta.resource_type
table = endpoint.Meta.table
url = endpoint.name
resources.append((resource_type, {'table': table, 'url': url}))
return resources
|
aio-libs/aiohttp_admin
|
aiohttp_admin/contrib/admin.py
|
Schema.resources
|
python
|
def resources(self):
resources = []
for endpoint in self.endpoints:
resource_type = endpoint.Meta.resource_type
table = endpoint.Meta.table
url = endpoint.name
resources.append((resource_type, {'table': table, 'url': url}))
return resources
|
Return list of all registered resources.
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/contrib/admin.py#L55-L68
| null |
class Schema:
"""
The main abstraction for registering tables and presenting data in
admin-on-rest format.
"""
def __init__(self, title='Admin'):
self.title = title
self.endpoints = []
def register(self, Endpoint):
"""
Register a wrapped `ModelAdmin` class as the endpoint for admin page.
@schema.register
class User(admin.ModelAdmin):
pass
"""
self.endpoints.append(Endpoint())
return Endpoint
def to_json(self):
"""
Prepare data for the initial state of the admin-on-rest
"""
endpoints = []
for endpoint in self.endpoints:
list_fields = endpoint.fields
resource_type = endpoint.Meta.resource_type
table = endpoint.Meta.table
data = endpoint.to_dict()
data['fields'] = resource_type.get_type_of_fields(
list_fields,
table,
)
endpoints.append(data)
data = {
'title': self.title,
'endpoints': sorted(endpoints, key=lambda x: x['name']),
}
return json.dumps(data)
@property
|
aio-libs/aiohttp_admin
|
aiohttp_admin/backends/sa.py
|
PGResource.get_type_of_fields
|
python
|
def get_type_of_fields(fields, table):
if not fields:
fields = table.primary_key
actual_fields = [
field for field in table.c.items() if field[0] in fields
]
data_type_fields = {
name: FIELD_TYPES.get(type(field_type.type), rc.TEXT_FIELD.value)
for name, field_type in actual_fields
}
return data_type_fields
|
Return data types of `fields` that are in `table`. If a given
parameter is empty return primary key.
:param fields: list - list of fields that need to be returned
:param table: sa.Table - the current table
:return: list - list of the tuples `(field_name, fields_type)`
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/backends/sa.py#L58-L80
| null |
class PGResource(AbstractResource):
def __init__(self, db, table, primary_key='id', url=None):
super().__init__(primary_key=primary_key, resource_name=url)
self._db = db
self._table = table
self._primary_key = primary_key
self._pk = table.c[primary_key]
# TODO: do we ability to pass custom validator for table?
self._create_validator = table_to_trafaret(table, primary_key,
skip_pk=True)
self._update_validator = table_to_trafaret(table, primary_key,
skip_pk=True)
@property
def pool(self):
return self._db
@property
def table(self):
return self._table
@staticmethod
@staticmethod
def get_type_for_inputs(table):
"""
Return information about table's fields in dictionary type.
:param table: sa.Table - the current table
:return: list - list of the dictionaries
"""
return [
dict(
type=INPUT_TYPES.get(
type(field_type.type), rc.TEXT_INPUT.value
),
name=name,
isPrimaryKey=(name in table.primary_key),
props=None,
) for name, field_type in table.c.items()
]
async def list(self, request):
await require(request, Permissions.view)
columns_names = list(self._table.c.keys())
q = validate_query(request.query, columns_names)
paging = calc_pagination(q, self._primary_key)
filters = q.get('_filters')
async with self.pool.acquire() as conn:
if filters:
query = create_filter(self.table, filters)
else:
query = self.table.select()
count = await conn.scalar(
sa.select([sa.func.count()])
.select_from(query.alias('foo')))
sort_dir = sa.asc if paging.sort_dir == ASC else sa.desc
cursor = await conn.execute(
query
.offset(paging.offset)
.limit(paging.limit)
.order_by(sort_dir(paging.sort_field)))
recs = await cursor.fetchall()
entities = list(map(dict, recs))
headers = {'X-Total-Count': str(count)}
return json_response(entities, headers=headers)
async def detail(self, request):
await require(request, Permissions.view)
entity_id = request.match_info['entity_id']
async with self.pool.acquire() as conn:
query = self.table.select().where(self._pk == entity_id)
resp = await conn.execute(query)
rec = await resp.first()
if not rec:
msg = 'Entity with id: {} not found'.format(entity_id)
raise ObjectNotFound(msg)
entity = dict(rec)
return json_response(entity)
async def create(self, request):
await require(request, Permissions.add)
raw_payload = await request.read()
data = validate_payload(raw_payload, self._create_validator)
async with self.pool.acquire() as conn:
query = self.table.insert().values(data).returning(*self.table.c)
rec = await conn.execute(query)
row = await rec.first()
await conn.execute('commit;')
entity = dict(row)
return json_response(entity)
async def update(self, request):
await require(request, Permissions.edit)
entity_id = request.match_info['entity_id']
raw_payload = await request.read()
data = validate_payload(raw_payload, self._update_validator)
# TODO: execute in transaction?
async with self.pool.acquire() as conn:
query = self.table.select().where(self._pk == entity_id)
row = await conn.execute(query)
rec = await row.first()
if not rec:
msg = 'Entity with id: {} not found'.format(entity_id)
raise ObjectNotFound(msg)
row = await conn.execute(
self.table.update()
.values(data)
.returning(*self.table.c)
.where(self._pk == entity_id))
rec = await row.first()
await conn.execute('commit;')
entity = dict(rec)
return json_response(entity)
async def delete(self, request):
await require(request, Permissions.delete)
entity_id = request.match_info['entity_id']
async with self.pool.acquire() as conn:
query = self.table.delete().where(self._pk == entity_id)
await conn.execute(query)
# TODO: Think about autocommit by default
await conn.execute('commit;')
return json_response({'status': 'deleted'})
|
aio-libs/aiohttp_admin
|
aiohttp_admin/backends/sa.py
|
PGResource.get_type_for_inputs
|
python
|
def get_type_for_inputs(table):
return [
dict(
type=INPUT_TYPES.get(
type(field_type.type), rc.TEXT_INPUT.value
),
name=name,
isPrimaryKey=(name in table.primary_key),
props=None,
) for name, field_type in table.c.items()
]
|
Return information about table's fields in dictionary type.
:param table: sa.Table - the current table
:return: list - list of the dictionaries
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/backends/sa.py#L83-L99
| null |
class PGResource(AbstractResource):
def __init__(self, db, table, primary_key='id', url=None):
super().__init__(primary_key=primary_key, resource_name=url)
self._db = db
self._table = table
self._primary_key = primary_key
self._pk = table.c[primary_key]
# TODO: do we ability to pass custom validator for table?
self._create_validator = table_to_trafaret(table, primary_key,
skip_pk=True)
self._update_validator = table_to_trafaret(table, primary_key,
skip_pk=True)
@property
def pool(self):
return self._db
@property
def table(self):
return self._table
@staticmethod
def get_type_of_fields(fields, table):
"""
Return data types of `fields` that are in `table`. If a given
parameter is empty return primary key.
:param fields: list - list of fields that need to be returned
:param table: sa.Table - the current table
:return: list - list of the tuples `(field_name, fields_type)`
"""
if not fields:
fields = table.primary_key
actual_fields = [
field for field in table.c.items() if field[0] in fields
]
data_type_fields = {
name: FIELD_TYPES.get(type(field_type.type), rc.TEXT_FIELD.value)
for name, field_type in actual_fields
}
return data_type_fields
@staticmethod
async def list(self, request):
await require(request, Permissions.view)
columns_names = list(self._table.c.keys())
q = validate_query(request.query, columns_names)
paging = calc_pagination(q, self._primary_key)
filters = q.get('_filters')
async with self.pool.acquire() as conn:
if filters:
query = create_filter(self.table, filters)
else:
query = self.table.select()
count = await conn.scalar(
sa.select([sa.func.count()])
.select_from(query.alias('foo')))
sort_dir = sa.asc if paging.sort_dir == ASC else sa.desc
cursor = await conn.execute(
query
.offset(paging.offset)
.limit(paging.limit)
.order_by(sort_dir(paging.sort_field)))
recs = await cursor.fetchall()
entities = list(map(dict, recs))
headers = {'X-Total-Count': str(count)}
return json_response(entities, headers=headers)
async def detail(self, request):
await require(request, Permissions.view)
entity_id = request.match_info['entity_id']
async with self.pool.acquire() as conn:
query = self.table.select().where(self._pk == entity_id)
resp = await conn.execute(query)
rec = await resp.first()
if not rec:
msg = 'Entity with id: {} not found'.format(entity_id)
raise ObjectNotFound(msg)
entity = dict(rec)
return json_response(entity)
async def create(self, request):
await require(request, Permissions.add)
raw_payload = await request.read()
data = validate_payload(raw_payload, self._create_validator)
async with self.pool.acquire() as conn:
query = self.table.insert().values(data).returning(*self.table.c)
rec = await conn.execute(query)
row = await rec.first()
await conn.execute('commit;')
entity = dict(row)
return json_response(entity)
async def update(self, request):
await require(request, Permissions.edit)
entity_id = request.match_info['entity_id']
raw_payload = await request.read()
data = validate_payload(raw_payload, self._update_validator)
# TODO: execute in transaction?
async with self.pool.acquire() as conn:
query = self.table.select().where(self._pk == entity_id)
row = await conn.execute(query)
rec = await row.first()
if not rec:
msg = 'Entity with id: {} not found'.format(entity_id)
raise ObjectNotFound(msg)
row = await conn.execute(
self.table.update()
.values(data)
.returning(*self.table.c)
.where(self._pk == entity_id))
rec = await row.first()
await conn.execute('commit;')
entity = dict(rec)
return json_response(entity)
async def delete(self, request):
await require(request, Permissions.delete)
entity_id = request.match_info['entity_id']
async with self.pool.acquire() as conn:
query = self.table.delete().where(self._pk == entity_id)
await conn.execute(query)
# TODO: Think about autocommit by default
await conn.execute('commit;')
return json_response({'status': 'deleted'})
|
aio-libs/aiohttp_admin
|
aiohttp_admin/__init__.py
|
_setup
|
python
|
def _setup(app, *, schema, title=None, app_key=APP_KEY, db=None):
admin = web.Application(loop=app.loop)
app[app_key] = admin
loader = jinja2.FileSystemLoader([TEMPLATES_ROOT, ])
aiohttp_jinja2.setup(admin, loader=loader, app_key=TEMPLATE_APP_KEY)
if title:
schema.title = title
resources = [
init(db, info['table'], url=info['url'])
for init, info in schema.resources
]
admin_handler = AdminOnRestHandler(
admin,
resources=resources,
loop=app.loop,
schema=schema,
)
admin['admin_handler'] = admin_handler
setup_admin_on_rest_handlers(admin, admin_handler)
return admin
|
Initialize the admin-on-rest admin
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/__init__.py#L44-L70
|
[
"def setup_admin_on_rest_handlers(admin, admin_handler):\n \"\"\"\n Initialize routes.\n \"\"\"\n add_route = admin.router.add_route\n add_static = admin.router.add_static\n static_folder = str(PROJ_ROOT / 'static')\n a = admin_handler\n\n add_route('GET', '', a.index_page, name='admin.index')\n add_route('POST', '/token', a.token, name='admin.token')\n add_static('/static', path=static_folder, name='admin.static')\n add_route('DELETE', '/logout', a.logout, name='admin.logout')\n"
] |
import aiohttp_jinja2
import jinja2
from aiohttp import web
from .admin import (
AdminHandler,
setup_admin_handlers,
setup_admin_on_rest_handlers,
AdminOnRestHandler,
)
from .consts import PROJ_ROOT, TEMPLATE_APP_KEY, APP_KEY, TEMPLATES_ROOT
from .security import Permissions, require, authorize
from .utils import gather_template_folders
__all__ = ['AdminHandler', 'setup', 'get_admin', 'Permissions', 'require',
'authorize', '_setup', ]
__version__ = '0.0.2'
def setup(app, admin_conf_path, *, resources, static_folder=None,
template_folder=None, template_name=None, name=None,
app_key=APP_KEY):
admin = web.Application(loop=app.loop)
app[app_key] = admin
tf = gather_template_folders(template_folder)
loader = jinja2.FileSystemLoader(tf)
aiohttp_jinja2.setup(admin, loader=loader, app_key=TEMPLATE_APP_KEY)
template_name = template_name or 'admin.html'
admin_handler = AdminHandler(admin, resources=resources, name=name,
template=template_name, loop=app.loop)
admin['admin_handler'] = admin_handler
admin['layout_path'] = admin_conf_path
static_folder = static_folder or str(PROJ_ROOT / 'static')
setup_admin_handlers(admin, admin_handler, static_folder, admin_conf_path)
return admin
def get_admin(app, *, app_key=APP_KEY):
return app.get(app_key)
|
aio-libs/aiohttp_admin
|
aiohttp_admin/contrib/models.py
|
ModelAdmin.to_dict
|
python
|
def to_dict(self):
data = {
"name": self.name,
"canEdit": self.can_edit,
"canCreate": self.can_create,
"canDelete": self.can_delete,
"perPage": self.per_page,
"showPage": self.generate_data_for_show_page(),
"editPage": self.generate_data_for_edit_page(),
"createPage": self.generate_data_for_create_page(),
}
return data
|
Return dict with the all base information about the instance.
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/contrib/models.py#L30-L45
|
[
"def generate_data_for_edit_page(self):\n \"\"\"\n Generate a custom representation of table's fields in dictionary type\n if exist edit form else use default representation.\n\n :return: dict\n \"\"\"\n\n if not self.can_edit:\n return {}\n\n if self.edit_form:\n return self.edit_form.to_dict()\n\n return self.generate_simple_data_page()\n",
"def generate_data_for_show_page(self):\n \"\"\"\n Generate a custom representation of table's fields in dictionary type\n if exist show form else use default representation.\n\n :return: dict\n \"\"\"\n if self.show_form:\n return self.show_form.to_dict()\n\n return self.generate_simple_data_page()\n",
"def generate_data_for_create_page(self):\n \"\"\"\n Generate a custom representation of table's fields in dictionary type\n if exist create form else use default representation.\n\n :return: dict\n \"\"\"\n if not self.can_create:\n return {}\n\n if self.create_form:\n return self.create_form.to_dict()\n\n return self.generate_simple_data_page()\n"
] |
class ModelAdmin:
"""
The class provides the possibility of declarative describe of information
about the table and describe all things related to viewing this table on
the administrator's page.
class Users(models.ModelAdmin):
class Meta:
resource_type = PGResource
table = users
"""
can_edit = True
can_create = True
can_delete = True
per_page = 10
fields = None
form = None
edit_form = None
create_form = None
show_form = None
def __init__(self):
self.name = self.__class__.__name__.lower()
self._table = self.Meta.table
self._resource_type = self.Meta.resource_type
def generate_simple_data_page(self):
"""
Generate a simple representation of table's fields in dictionary type.
:return: dict
"""
return self._resource_type.get_type_for_inputs(self._table)
def generate_data_for_edit_page(self):
"""
Generate a custom representation of table's fields in dictionary type
if exist edit form else use default representation.
:return: dict
"""
if not self.can_edit:
return {}
if self.edit_form:
return self.edit_form.to_dict()
return self.generate_simple_data_page()
def generate_data_for_show_page(self):
"""
Generate a custom representation of table's fields in dictionary type
if exist show form else use default representation.
:return: dict
"""
if self.show_form:
return self.show_form.to_dict()
return self.generate_simple_data_page()
def generate_data_for_create_page(self):
"""
Generate a custom representation of table's fields in dictionary type
if exist create form else use default representation.
:return: dict
"""
if not self.can_create:
return {}
if self.create_form:
return self.create_form.to_dict()
return self.generate_simple_data_page()
|
aio-libs/aiohttp_admin
|
aiohttp_admin/contrib/models.py
|
ModelAdmin.generate_data_for_edit_page
|
python
|
def generate_data_for_edit_page(self):
if not self.can_edit:
return {}
if self.edit_form:
return self.edit_form.to_dict()
return self.generate_simple_data_page()
|
Generate a custom representation of table's fields in dictionary type
if exist edit form else use default representation.
:return: dict
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/contrib/models.py#L55-L69
|
[
"def generate_simple_data_page(self):\n \"\"\"\n Generate a simple representation of table's fields in dictionary type.\n\n :return: dict\n \"\"\"\n return self._resource_type.get_type_for_inputs(self._table)\n"
] |
class ModelAdmin:
"""
The class provides the possibility of declarative describe of information
about the table and describe all things related to viewing this table on
the administrator's page.
class Users(models.ModelAdmin):
class Meta:
resource_type = PGResource
table = users
"""
can_edit = True
can_create = True
can_delete = True
per_page = 10
fields = None
form = None
edit_form = None
create_form = None
show_form = None
def __init__(self):
self.name = self.__class__.__name__.lower()
self._table = self.Meta.table
self._resource_type = self.Meta.resource_type
def to_dict(self):
"""
Return dict with the all base information about the instance.
"""
data = {
"name": self.name,
"canEdit": self.can_edit,
"canCreate": self.can_create,
"canDelete": self.can_delete,
"perPage": self.per_page,
"showPage": self.generate_data_for_show_page(),
"editPage": self.generate_data_for_edit_page(),
"createPage": self.generate_data_for_create_page(),
}
return data
def generate_simple_data_page(self):
"""
Generate a simple representation of table's fields in dictionary type.
:return: dict
"""
return self._resource_type.get_type_for_inputs(self._table)
def generate_data_for_show_page(self):
"""
Generate a custom representation of table's fields in dictionary type
if exist show form else use default representation.
:return: dict
"""
if self.show_form:
return self.show_form.to_dict()
return self.generate_simple_data_page()
def generate_data_for_create_page(self):
"""
Generate a custom representation of table's fields in dictionary type
if exist create form else use default representation.
:return: dict
"""
if not self.can_create:
return {}
if self.create_form:
return self.create_form.to_dict()
return self.generate_simple_data_page()
|
aio-libs/aiohttp_admin
|
aiohttp_admin/contrib/models.py
|
ModelAdmin.generate_data_for_create_page
|
python
|
def generate_data_for_create_page(self):
if not self.can_create:
return {}
if self.create_form:
return self.create_form.to_dict()
return self.generate_simple_data_page()
|
Generate a custom representation of table's fields in dictionary type
if exist create form else use default representation.
:return: dict
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/contrib/models.py#L83-L96
|
[
"def generate_simple_data_page(self):\n \"\"\"\n Generate a simple representation of table's fields in dictionary type.\n\n :return: dict\n \"\"\"\n return self._resource_type.get_type_for_inputs(self._table)\n"
] |
class ModelAdmin:
"""
The class provides the possibility of declarative describe of information
about the table and describe all things related to viewing this table on
the administrator's page.
class Users(models.ModelAdmin):
class Meta:
resource_type = PGResource
table = users
"""
can_edit = True
can_create = True
can_delete = True
per_page = 10
fields = None
form = None
edit_form = None
create_form = None
show_form = None
def __init__(self):
self.name = self.__class__.__name__.lower()
self._table = self.Meta.table
self._resource_type = self.Meta.resource_type
def to_dict(self):
"""
Return dict with the all base information about the instance.
"""
data = {
"name": self.name,
"canEdit": self.can_edit,
"canCreate": self.can_create,
"canDelete": self.can_delete,
"perPage": self.per_page,
"showPage": self.generate_data_for_show_page(),
"editPage": self.generate_data_for_edit_page(),
"createPage": self.generate_data_for_create_page(),
}
return data
def generate_simple_data_page(self):
"""
Generate a simple representation of table's fields in dictionary type.
:return: dict
"""
return self._resource_type.get_type_for_inputs(self._table)
def generate_data_for_edit_page(self):
"""
Generate a custom representation of table's fields in dictionary type
if exist edit form else use default representation.
:return: dict
"""
if not self.can_edit:
return {}
if self.edit_form:
return self.edit_form.to_dict()
return self.generate_simple_data_page()
def generate_data_for_show_page(self):
"""
Generate a custom representation of table's fields in dictionary type
if exist show form else use default representation.
:return: dict
"""
if self.show_form:
return self.show_form.to_dict()
return self.generate_simple_data_page()
|
aio-libs/aiohttp_admin
|
demos/motortwit/motortwit/views.py
|
SiteHandler.register
|
python
|
async def register(self, request):
session = await get_session(request)
user_id = session.get('user_id')
if user_id:
return redirect(request, 'timeline')
error = None
form = None
if request.method == 'POST':
form = await request.post()
user_id = await db.get_user_id(self.mongo.user, form['username'])
if not form['username']:
error = 'You have to enter a username'
elif not form['email'] or '@' not in form['email']:
error = 'You have to enter a valid email address'
elif not form['password']:
error = 'You have to enter a password'
elif form['password'] != form['password2']:
error = 'The two passwords do not match'
elif user_id is not None:
error = 'The username is already taken'
else:
await self.mongo.user.insert(
{'username': form['username'],
'email': form['email'],
'pw_hash': generate_password_hash(form['password'])})
return redirect(request, 'login')
return {"error": error, "form": form}
|
Registers the user.
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/demos/motortwit/motortwit/views.py#L116-L145
| null |
class SiteHandler:
def __init__(self, mongo):
self._mongo = mongo
@property
def mongo(self):
return self._mongo
@aiohttp_jinja2.template('timeline.html')
async def timeline(self, request):
session = await get_session(request)
user_id = session.get('user_id')
if user_id is None:
router = request.app.router
location = router['public_timeline'].url_for().human_repr()
raise web.HTTPFound(location=location)
user = await self.mongo.user.find_one({'_id': ObjectId(user_id)})
query = {'who_id': ObjectId(user_id)}
filter = {'whom_id': 1}
followed = await self.mongo.follower.find_one(query, filter)
if followed is None:
followed = {'whom_id': []}
query = {'$or': [{'author_id': ObjectId(user_id)},
{'author_id': {'$in': followed['whom_id']}}]}
messages = await self.mongo.message\
.find(query)\
.sort('pub_date', -1)\
.to_list(30)
endpoint = request.match_info.route.name
return {"messages": messages,
"user": user,
"endpoint": endpoint}
@aiohttp_jinja2.template('timeline.html')
async def public_timeline(self, request):
messages = await self.mongo.message\
.find()\
.sort('pub_date', -1)\
.to_list(30)
return {"messages": messages,
"endpoint": request.match_info.route.name}
@aiohttp_jinja2.template('timeline.html')
async def user_timeline(self, request):
username = request.match_info['username']
profile_user = await self.mongo.user.find_one({'username': username})
if profile_user is None:
raise web.HTTPNotFound()
followed = False
session = await get_session(request)
user_id = session.get('user_id')
user = None
if user_id:
user = await self.mongo.user.find_one({'_id': ObjectId(user_id)})
followed = await self.mongo.follower.find_one(
{'who_id': ObjectId(session['user_id']),
'whom_id': {'$in': [ObjectId(profile_user['_id'])]}})
followed = followed is not None
messages = await self.mongo.message\
.find({'author_id': ObjectId(profile_user['_id'])})\
.sort('pub_date', -1)\
.to_list(30)
profile_user['_id'] = str(profile_user['_id'])
return {"messages": messages,
"followed": followed,
"profile_user": profile_user,
"user": user,
"endpoint": request.match_info.route.name}
@aiohttp_jinja2.template('login.html')
async def login(self, request):
session = await get_session(request)
user_id = session.get('user_id')
if user_id:
return redirect(request, 'timeline')
error = None
form = None
if request.method == 'POST':
form = await request.post()
user = await self.mongo.user.find_one(
{'username': form['username']})
if user is None:
error = 'Invalid username'
elif not check_password_hash(user['pw_hash'], form['password']):
error = 'Invalid password'
else:
session['user_id'] = str(user['_id'])
return redirect(request, 'timeline')
return {"error": error, "form": form}
async def logout(self, request):
session = await get_session(request)
session.pop('user_id', None)
return redirect(request, 'public_timeline')
@aiohttp_jinja2.template('register.html')
async def follow_user(self, request):
"""Adds the current user as follower of the given user."""
username = request.match_info['username']
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
whom_id = await db.get_user_id(self.mongo.user, username)
if whom_id is None:
raise web.HTTPFound()
await self.mongo.follower.update(
{'who_id': ObjectId(user_id)},
{'$push': {'whom_id': whom_id}}, upsert=True)
return redirect(request, 'user_timeline', parts={"username": username})
async def unfollow_user(self, request):
"""Removes the current user as follower of the given user."""
username = request.match_info['username']
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
whom_id = await db.get_user_id(self.mongo.user, username)
if whom_id is None:
raise web.HTTPFound()
await self.mongo.follower.update(
{'who_id': ObjectId(session['user_id'])},
{'$pull': {'whom_id': whom_id}})
return redirect(request, 'user_timeline', parts={"username": username})
async def add_message(self, request):
"""Registers a new message for the user."""
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
form = await request.post()
if form.get('text'):
user = await self.mongo.user.find_one(
{'_id': ObjectId(session['user_id'])},
{'email': 1, 'username': 1})
await self.mongo.message.insert(
{'author_id': ObjectId(user_id),
'email': user['email'],
'username': user['username'],
'text': form['text'],
'pub_date': datetime.datetime.utcnow()})
return redirect(request, 'timeline')
|
aio-libs/aiohttp_admin
|
demos/motortwit/motortwit/views.py
|
SiteHandler.follow_user
|
python
|
async def follow_user(self, request):
username = request.match_info['username']
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
whom_id = await db.get_user_id(self.mongo.user, username)
if whom_id is None:
raise web.HTTPFound()
await self.mongo.follower.update(
{'who_id': ObjectId(user_id)},
{'$push': {'whom_id': whom_id}}, upsert=True)
return redirect(request, 'user_timeline', parts={"username": username})
|
Adds the current user as follower of the given user.
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/demos/motortwit/motortwit/views.py#L147-L165
| null |
class SiteHandler:
def __init__(self, mongo):
self._mongo = mongo
@property
def mongo(self):
return self._mongo
@aiohttp_jinja2.template('timeline.html')
async def timeline(self, request):
session = await get_session(request)
user_id = session.get('user_id')
if user_id is None:
router = request.app.router
location = router['public_timeline'].url_for().human_repr()
raise web.HTTPFound(location=location)
user = await self.mongo.user.find_one({'_id': ObjectId(user_id)})
query = {'who_id': ObjectId(user_id)}
filter = {'whom_id': 1}
followed = await self.mongo.follower.find_one(query, filter)
if followed is None:
followed = {'whom_id': []}
query = {'$or': [{'author_id': ObjectId(user_id)},
{'author_id': {'$in': followed['whom_id']}}]}
messages = await self.mongo.message\
.find(query)\
.sort('pub_date', -1)\
.to_list(30)
endpoint = request.match_info.route.name
return {"messages": messages,
"user": user,
"endpoint": endpoint}
@aiohttp_jinja2.template('timeline.html')
async def public_timeline(self, request):
messages = await self.mongo.message\
.find()\
.sort('pub_date', -1)\
.to_list(30)
return {"messages": messages,
"endpoint": request.match_info.route.name}
@aiohttp_jinja2.template('timeline.html')
async def user_timeline(self, request):
username = request.match_info['username']
profile_user = await self.mongo.user.find_one({'username': username})
if profile_user is None:
raise web.HTTPNotFound()
followed = False
session = await get_session(request)
user_id = session.get('user_id')
user = None
if user_id:
user = await self.mongo.user.find_one({'_id': ObjectId(user_id)})
followed = await self.mongo.follower.find_one(
{'who_id': ObjectId(session['user_id']),
'whom_id': {'$in': [ObjectId(profile_user['_id'])]}})
followed = followed is not None
messages = await self.mongo.message\
.find({'author_id': ObjectId(profile_user['_id'])})\
.sort('pub_date', -1)\
.to_list(30)
profile_user['_id'] = str(profile_user['_id'])
return {"messages": messages,
"followed": followed,
"profile_user": profile_user,
"user": user,
"endpoint": request.match_info.route.name}
@aiohttp_jinja2.template('login.html')
async def login(self, request):
session = await get_session(request)
user_id = session.get('user_id')
if user_id:
return redirect(request, 'timeline')
error = None
form = None
if request.method == 'POST':
form = await request.post()
user = await self.mongo.user.find_one(
{'username': form['username']})
if user is None:
error = 'Invalid username'
elif not check_password_hash(user['pw_hash'], form['password']):
error = 'Invalid password'
else:
session['user_id'] = str(user['_id'])
return redirect(request, 'timeline')
return {"error": error, "form": form}
async def logout(self, request):
session = await get_session(request)
session.pop('user_id', None)
return redirect(request, 'public_timeline')
@aiohttp_jinja2.template('register.html')
async def register(self, request):
"""Registers the user."""
session = await get_session(request)
user_id = session.get('user_id')
if user_id:
return redirect(request, 'timeline')
error = None
form = None
if request.method == 'POST':
form = await request.post()
user_id = await db.get_user_id(self.mongo.user, form['username'])
if not form['username']:
error = 'You have to enter a username'
elif not form['email'] or '@' not in form['email']:
error = 'You have to enter a valid email address'
elif not form['password']:
error = 'You have to enter a password'
elif form['password'] != form['password2']:
error = 'The two passwords do not match'
elif user_id is not None:
error = 'The username is already taken'
else:
await self.mongo.user.insert(
{'username': form['username'],
'email': form['email'],
'pw_hash': generate_password_hash(form['password'])})
return redirect(request, 'login')
return {"error": error, "form": form}
async def unfollow_user(self, request):
"""Removes the current user as follower of the given user."""
username = request.match_info['username']
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
whom_id = await db.get_user_id(self.mongo.user, username)
if whom_id is None:
raise web.HTTPFound()
await self.mongo.follower.update(
{'who_id': ObjectId(session['user_id'])},
{'$pull': {'whom_id': whom_id}})
return redirect(request, 'user_timeline', parts={"username": username})
async def add_message(self, request):
"""Registers a new message for the user."""
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
form = await request.post()
if form.get('text'):
user = await self.mongo.user.find_one(
{'_id': ObjectId(session['user_id'])},
{'email': 1, 'username': 1})
await self.mongo.message.insert(
{'author_id': ObjectId(user_id),
'email': user['email'],
'username': user['username'],
'text': form['text'],
'pub_date': datetime.datetime.utcnow()})
return redirect(request, 'timeline')
|
aio-libs/aiohttp_admin
|
demos/motortwit/motortwit/views.py
|
SiteHandler.add_message
|
python
|
async def add_message(self, request):
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
form = await request.post()
if form.get('text'):
user = await self.mongo.user.find_one(
{'_id': ObjectId(session['user_id'])},
{'email': 1, 'username': 1})
await self.mongo.message.insert(
{'author_id': ObjectId(user_id),
'email': user['email'],
'username': user['username'],
'text': form['text'],
'pub_date': datetime.datetime.utcnow()})
return redirect(request, 'timeline')
|
Registers a new message for the user.
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/demos/motortwit/motortwit/views.py#L184-L203
| null |
class SiteHandler:
def __init__(self, mongo):
self._mongo = mongo
@property
def mongo(self):
return self._mongo
@aiohttp_jinja2.template('timeline.html')
async def timeline(self, request):
session = await get_session(request)
user_id = session.get('user_id')
if user_id is None:
router = request.app.router
location = router['public_timeline'].url_for().human_repr()
raise web.HTTPFound(location=location)
user = await self.mongo.user.find_one({'_id': ObjectId(user_id)})
query = {'who_id': ObjectId(user_id)}
filter = {'whom_id': 1}
followed = await self.mongo.follower.find_one(query, filter)
if followed is None:
followed = {'whom_id': []}
query = {'$or': [{'author_id': ObjectId(user_id)},
{'author_id': {'$in': followed['whom_id']}}]}
messages = await self.mongo.message\
.find(query)\
.sort('pub_date', -1)\
.to_list(30)
endpoint = request.match_info.route.name
return {"messages": messages,
"user": user,
"endpoint": endpoint}
@aiohttp_jinja2.template('timeline.html')
async def public_timeline(self, request):
messages = await self.mongo.message\
.find()\
.sort('pub_date', -1)\
.to_list(30)
return {"messages": messages,
"endpoint": request.match_info.route.name}
@aiohttp_jinja2.template('timeline.html')
async def user_timeline(self, request):
username = request.match_info['username']
profile_user = await self.mongo.user.find_one({'username': username})
if profile_user is None:
raise web.HTTPNotFound()
followed = False
session = await get_session(request)
user_id = session.get('user_id')
user = None
if user_id:
user = await self.mongo.user.find_one({'_id': ObjectId(user_id)})
followed = await self.mongo.follower.find_one(
{'who_id': ObjectId(session['user_id']),
'whom_id': {'$in': [ObjectId(profile_user['_id'])]}})
followed = followed is not None
messages = await self.mongo.message\
.find({'author_id': ObjectId(profile_user['_id'])})\
.sort('pub_date', -1)\
.to_list(30)
profile_user['_id'] = str(profile_user['_id'])
return {"messages": messages,
"followed": followed,
"profile_user": profile_user,
"user": user,
"endpoint": request.match_info.route.name}
@aiohttp_jinja2.template('login.html')
async def login(self, request):
session = await get_session(request)
user_id = session.get('user_id')
if user_id:
return redirect(request, 'timeline')
error = None
form = None
if request.method == 'POST':
form = await request.post()
user = await self.mongo.user.find_one(
{'username': form['username']})
if user is None:
error = 'Invalid username'
elif not check_password_hash(user['pw_hash'], form['password']):
error = 'Invalid password'
else:
session['user_id'] = str(user['_id'])
return redirect(request, 'timeline')
return {"error": error, "form": form}
async def logout(self, request):
session = await get_session(request)
session.pop('user_id', None)
return redirect(request, 'public_timeline')
@aiohttp_jinja2.template('register.html')
async def register(self, request):
"""Registers the user."""
session = await get_session(request)
user_id = session.get('user_id')
if user_id:
return redirect(request, 'timeline')
error = None
form = None
if request.method == 'POST':
form = await request.post()
user_id = await db.get_user_id(self.mongo.user, form['username'])
if not form['username']:
error = 'You have to enter a username'
elif not form['email'] or '@' not in form['email']:
error = 'You have to enter a valid email address'
elif not form['password']:
error = 'You have to enter a password'
elif form['password'] != form['password2']:
error = 'The two passwords do not match'
elif user_id is not None:
error = 'The username is already taken'
else:
await self.mongo.user.insert(
{'username': form['username'],
'email': form['email'],
'pw_hash': generate_password_hash(form['password'])})
return redirect(request, 'login')
return {"error": error, "form": form}
async def follow_user(self, request):
"""Adds the current user as follower of the given user."""
username = request.match_info['username']
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
whom_id = await db.get_user_id(self.mongo.user, username)
if whom_id is None:
raise web.HTTPFound()
await self.mongo.follower.update(
{'who_id': ObjectId(user_id)},
{'$push': {'whom_id': whom_id}}, upsert=True)
return redirect(request, 'user_timeline', parts={"username": username})
async def unfollow_user(self, request):
"""Removes the current user as follower of the given user."""
username = request.match_info['username']
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
whom_id = await db.get_user_id(self.mongo.user, username)
if whom_id is None:
raise web.HTTPFound()
await self.mongo.follower.update(
{'who_id': ObjectId(session['user_id'])},
{'$pull': {'whom_id': whom_id}})
return redirect(request, 'user_timeline', parts={"username": username})
|
aio-libs/aiohttp_admin
|
demos/motortwit/motortwit/utils.py
|
robo_avatar_url
|
python
|
def robo_avatar_url(user_data, size=80):
hash = md5(str(user_data).strip().lower().encode('utf-8')).hexdigest()
url = "https://robohash.org/{hash}.png?size={size}x{size}".format(
hash=hash, size=size)
return url
|
Return the gravatar image for the given email address.
|
train
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/demos/motortwit/motortwit/utils.py#L27-L32
| null |
import pytz
import yaml
from hashlib import md5
from dateutil.parser import parse
from aiohttp import web
import motor.motor_asyncio as aiomotor
def load_config(fname):
with open(fname, 'rt') as f:
data = yaml.load(f)
# TODO: add config validation
return data
async def init_mongo(conf, loop):
mongo_uri = "mongodb://{}:{}".format(conf['host'], conf['port'])
conn = aiomotor.AsyncIOMotorClient(
mongo_uri,
maxPoolSize=conf['max_pool_size'],
io_loop=loop)
db_name = conf['database']
return conn[db_name]
def format_datetime(timestamp):
if isinstance(timestamp, str):
timestamp = parse(timestamp)
return timestamp.replace(tzinfo=pytz.utc).strftime('%Y-%m-%d @ %H:%M')
def redirect(request, name, **kw):
router = request.app.router
location = router[name].url(**kw)
return web.HTTPFound(location=location)
|
dakrauth/django-swingtime
|
swingtime/forms.py
|
timeslot_options
|
python
|
def timeslot_options(
interval=swingtime_settings.TIMESLOT_INTERVAL,
start_time=swingtime_settings.TIMESLOT_START_TIME,
end_delta=swingtime_settings.TIMESLOT_END_TIME_DURATION,
fmt=swingtime_settings.TIMESLOT_TIME_FORMAT
):
'''
Create a list of time slot options for use in swingtime forms.
The list is comprised of 2-tuples containing a 24-hour time value and a
12-hour temporal representation of that offset.
'''
dt = datetime.combine(date.today(), time(0))
dtstart = datetime.combine(dt.date(), start_time)
dtend = dtstart + end_delta
options = []
while dtstart <= dtend:
options.append((str(dtstart.time()), dtstart.strftime(fmt)))
dtstart += interval
return options
|
Create a list of time slot options for use in swingtime forms.
The list is comprised of 2-tuples containing a 24-hour time value and a
12-hour temporal representation of that offset.
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/forms.py#L101-L123
| null |
'''
Convenience forms for adding and updating ``Event`` and ``Occurrence``s.
'''
from datetime import datetime, date, time, timedelta
from django import forms
from django.forms.utils import to_current_timezone
from django.utils.translation import ugettext_lazy as _
from django.forms.widgets import SelectDateWidget
from dateutil import rrule
from .conf import swingtime_settings
from .models import *
from . import utils
WEEKDAY_SHORT = (
(7, _('Sun')),
(1, _('Mon')),
(2, _('Tue')),
(3, _('Wed')),
(4, _('Thu')),
(5, _('Fri')),
(6, _('Sat'))
)
WEEKDAY_LONG = (
(7, _('Sunday')),
(1, _('Monday')),
(2, _('Tuesday')),
(3, _('Wednesday')),
(4, _('Thursday')),
(5, _('Friday')),
(6, _('Saturday'))
)
MONTH_LONG = (
(1, _('January')),
(2, _('February')),
(3, _('March')),
(4, _('April')),
(5, _('May')),
(6, _('June')),
(7, _('July')),
(8, _('August')),
(9, _('September')),
(10, _('October')),
(11, _('November')),
(12, _('December')),
)
MONTH_SHORT = (
(1, _('Jan')),
(2, _('Feb')),
(3, _('Mar')),
(4, _('Apr')),
(5, _('May')),
(6, _('Jun')),
(7, _('Jul')),
(8, _('Aug')),
(9, _('Sep')),
(10, _('Oct')),
(11, _('Nov')),
(12, _('Dec')),
)
ORDINAL = (
(1, _('first')),
(2, _('second')),
(3, _('third')),
(4, _('fourth')),
(-1, _('last'))
)
FREQUENCY_CHOICES = (
(rrule.DAILY, _('Day(s)')),
(rrule.WEEKLY, _('Week(s)')),
(rrule.MONTHLY, _('Month(s)')),
(rrule.YEARLY, _('Year(s)')),
)
REPEAT_CHOICES = (
('count', _('By count')),
('until', _('Until date')),
)
ISO_WEEKDAYS_MAP = (
None,
rrule.MO,
rrule.TU,
rrule.WE,
rrule.TH,
rrule.FR,
rrule.SA,
rrule.SU
)
MINUTES_INTERVAL = swingtime_settings.TIMESLOT_INTERVAL.seconds // 60
SECONDS_INTERVAL = utils.time_delta_total_seconds(swingtime_settings.DEFAULT_OCCURRENCE_DURATION)
def timeslot_offset_options(
interval=swingtime_settings.TIMESLOT_INTERVAL,
start_time=swingtime_settings.TIMESLOT_START_TIME,
end_delta=swingtime_settings.TIMESLOT_END_TIME_DURATION,
fmt=swingtime_settings.TIMESLOT_TIME_FORMAT
):
'''
Create a list of time slot options for use in swingtime forms.
The list is comprised of 2-tuples containing the number of seconds since the
start of the day and a 12-hour temporal representation of that offset.
'''
dt = datetime.combine(date.today(), time(0))
dtstart = datetime.combine(dt.date(), start_time)
dtend = dtstart + end_delta
options = []
delta = utils.time_delta_total_seconds(dtstart - dt)
seconds = utils.time_delta_total_seconds(interval)
while dtstart <= dtend:
options.append((delta, dtstart.strftime(fmt)))
dtstart += interval
delta += seconds
return options
default_timeslot_options = timeslot_options()
default_timeslot_offset_options = timeslot_offset_options()
class MultipleIntegerField(forms.MultipleChoiceField):
'''
A form field for handling multiple integers.
'''
def __init__(self, choices, size=None, label=None, widget=None):
widget = widget or forms.SelectMultiple(attrs={'size': size or len(choices)})
super().__init__(
required=False,
choices=choices,
label=label,
widget=widget,
)
def clean(self, value):
return [int(i) for i in super().clean(value)]
class SplitDateTimeWidget(forms.MultiWidget):
'''
A Widget that splits datetime input into a SelectDateWidget for dates and
Select widget for times.
'''
def __init__(self, attrs=None):
widgets = (
SelectDateWidget(attrs=attrs),
forms.Select(choices=default_timeslot_options, attrs=attrs)
)
super().__init__(widgets)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class MultipleOccurrenceForm(forms.Form):
day = forms.DateField(
label=_('Date'),
initial=date.today,
widget=SelectDateWidget()
)
start_time_delta = forms.IntegerField(
label=_('Start time'),
widget=forms.Select(choices=default_timeslot_offset_options)
)
end_time_delta = forms.IntegerField(
label=_('End time'),
widget=forms.Select(choices=default_timeslot_offset_options)
)
# recurrence options
repeats = forms.ChoiceField(
choices=REPEAT_CHOICES,
initial='count',
label=_('Occurrences'),
widget=forms.RadioSelect()
)
count = forms.IntegerField(
label=_('Total Occurrences'),
initial=1,
required=False,
widget=forms.TextInput(attrs=dict(size=2, max_length=2))
)
until = forms.DateField(
required=False,
initial=date.today,
widget=SelectDateWidget()
)
freq = forms.IntegerField(
label=_('Frequency'),
initial=rrule.WEEKLY,
widget=forms.RadioSelect(choices=FREQUENCY_CHOICES),
)
interval = forms.IntegerField(
required=False,
initial='1',
widget=forms.TextInput(attrs=dict(size=3, max_length=3))
)
# weekly options
week_days = MultipleIntegerField(
WEEKDAY_SHORT,
label=_('Weekly options'),
widget=forms.CheckboxSelectMultiple
)
# monthly options
month_option = forms.ChoiceField(
choices=(('on', _('On the')), ('each', _('Each:'))),
initial='each',
widget=forms.RadioSelect(),
label=_('Monthly options')
)
month_ordinal = forms.IntegerField(
widget=forms.Select(choices=ORDINAL),
required=False
)
month_ordinal_day = forms.IntegerField(
widget=forms.Select(choices=WEEKDAY_LONG),
required=False
)
each_month_day = MultipleIntegerField(
[(i, i) for i in range(1, 32)],
widget=forms.CheckboxSelectMultiple
)
# yearly options
year_months = MultipleIntegerField(
MONTH_SHORT,
label=_('Yearly options'),
widget=forms.CheckboxSelectMultiple
)
is_year_month_ordinal = forms.BooleanField(required=False)
year_month_ordinal = forms.IntegerField(
widget=forms.Select(choices=ORDINAL),
required=False
)
year_month_ordinal_day = forms.IntegerField(
widget=forms.Select(choices=WEEKDAY_LONG),
required=False
)
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
dtstart = self.initial.get('dtstart', None)
if dtstart:
dtstart = dtstart.replace(
minute=((dtstart.minute // MINUTES_INTERVAL) * MINUTES_INTERVAL),
second=0,
microsecond=0
)
weekday = dtstart.isoweekday()
ordinal = dtstart.day // 7
ordinal = '%d' % (-1 if ordinal > 3 else ordinal + 1,)
midnight = datetime.combine(dtstart.date(), time(0, tzinfo=dtstart.tzinfo))
offset = (dtstart - midnight).seconds
self.initial.setdefault('day', dtstart)
self.initial.setdefault('week_days', '%d' % weekday)
self.initial.setdefault('month_ordinal', ordinal)
self.initial.setdefault('month_ordinal_day', '%d' % weekday)
self.initial.setdefault('each_month_day', ['%d' % dtstart.day])
self.initial.setdefault('year_months', ['%d' % dtstart.month])
self.initial.setdefault('year_month_ordinal', ordinal)
self.initial.setdefault('year_month_ordinal_day', '%d' % weekday)
self.initial.setdefault('start_time_delta', '%d' % offset)
self.initial.setdefault('end_time_delta', '%d' % (offset + SECONDS_INTERVAL,))
def clean(self):
if 'day' in self.cleaned_data:
day = datetime.combine(self.cleaned_data['day'], time(0))
self.cleaned_data['start_time'] = day + timedelta(
seconds=self.cleaned_data['start_time_delta']
)
self.cleaned_data['end_time'] = day + timedelta(
seconds=self.cleaned_data['end_time_delta']
)
return self.cleaned_data
def save(self, event):
if self.cleaned_data['repeats'] == 'count' and self.cleaned_data['count'] == 1:
params = {}
else:
params = self._build_rrule_params()
event.add_occurrences(
self.cleaned_data['start_time'],
self.cleaned_data['end_time'],
**params
)
return event
def _build_rrule_params(self):
iso = ISO_WEEKDAYS_MAP
data = self.cleaned_data
params = dict(
freq=data['freq'],
interval=data['interval'] or 1
)
if data['repeats'] == 'until':
params['until'] = data['until']
else:
params['count'] = data.get('count', 1)
if params['freq'] == rrule.WEEKLY:
params['byweekday'] = [iso[n] for n in data['week_days']]
elif params['freq'] == rrule.MONTHLY:
if 'on' == data['month_option']:
ordinal = data['month_ordinal']
day = iso[data['month_ordinal_day']]
params.update(byweekday=day, bysetpos=ordinal)
else:
params['bymonthday'] = data['each_month_day']
elif params['freq'] == rrule.YEARLY:
params['bymonth'] = data['year_months']
if data['is_year_month_ordinal']:
ordinal = data['year_month_ordinal']
day = iso[data['year_month_ordinal_day']]
params['byweekday'] = day(ordinal)
elif params['freq'] != rrule.DAILY:
raise NotImplementedError(_('Unknown interval rule ' + params['freq']))
return params
class EventForm(forms.ModelForm):
'''
A simple form for adding and updating Event attributes
'''
class Meta:
model = Event
fields = "__all__"
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
self.fields['description'].required = False
class SingleOccurrenceForm(forms.ModelForm):
'''
A simple form for adding and updating single Occurrence attributes
'''
start_time = forms.SplitDateTimeField(widget=SplitDateTimeWidget)
end_time = forms.SplitDateTimeField(widget=SplitDateTimeWidget)
class Meta:
model = Occurrence
fields = "__all__"
|
dakrauth/django-swingtime
|
swingtime/forms.py
|
timeslot_offset_options
|
python
|
def timeslot_offset_options(
interval=swingtime_settings.TIMESLOT_INTERVAL,
start_time=swingtime_settings.TIMESLOT_START_TIME,
end_delta=swingtime_settings.TIMESLOT_END_TIME_DURATION,
fmt=swingtime_settings.TIMESLOT_TIME_FORMAT
):
'''
Create a list of time slot options for use in swingtime forms.
The list is comprised of 2-tuples containing the number of seconds since the
start of the day and a 12-hour temporal representation of that offset.
'''
dt = datetime.combine(date.today(), time(0))
dtstart = datetime.combine(dt.date(), start_time)
dtend = dtstart + end_delta
options = []
delta = utils.time_delta_total_seconds(dtstart - dt)
seconds = utils.time_delta_total_seconds(interval)
while dtstart <= dtend:
options.append((delta, dtstart.strftime(fmt)))
dtstart += interval
delta += seconds
return options
|
Create a list of time slot options for use in swingtime forms.
The list is comprised of 2-tuples containing the number of seconds since the
start of the day and a 12-hour temporal representation of that offset.
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/forms.py#L126-L151
|
[
"def time_delta_total_seconds(time_delta):\n '''\n Calculate the total number of seconds represented by a\n ``datetime.timedelta`` object\n\n '''\n return time_delta.days * 3600 + time_delta.seconds\n"
] |
'''
Convenience forms for adding and updating ``Event`` and ``Occurrence``s.
'''
from datetime import datetime, date, time, timedelta
from django import forms
from django.forms.utils import to_current_timezone
from django.utils.translation import ugettext_lazy as _
from django.forms.widgets import SelectDateWidget
from dateutil import rrule
from .conf import swingtime_settings
from .models import *
from . import utils
WEEKDAY_SHORT = (
(7, _('Sun')),
(1, _('Mon')),
(2, _('Tue')),
(3, _('Wed')),
(4, _('Thu')),
(5, _('Fri')),
(6, _('Sat'))
)
WEEKDAY_LONG = (
(7, _('Sunday')),
(1, _('Monday')),
(2, _('Tuesday')),
(3, _('Wednesday')),
(4, _('Thursday')),
(5, _('Friday')),
(6, _('Saturday'))
)
MONTH_LONG = (
(1, _('January')),
(2, _('February')),
(3, _('March')),
(4, _('April')),
(5, _('May')),
(6, _('June')),
(7, _('July')),
(8, _('August')),
(9, _('September')),
(10, _('October')),
(11, _('November')),
(12, _('December')),
)
MONTH_SHORT = (
(1, _('Jan')),
(2, _('Feb')),
(3, _('Mar')),
(4, _('Apr')),
(5, _('May')),
(6, _('Jun')),
(7, _('Jul')),
(8, _('Aug')),
(9, _('Sep')),
(10, _('Oct')),
(11, _('Nov')),
(12, _('Dec')),
)
ORDINAL = (
(1, _('first')),
(2, _('second')),
(3, _('third')),
(4, _('fourth')),
(-1, _('last'))
)
FREQUENCY_CHOICES = (
(rrule.DAILY, _('Day(s)')),
(rrule.WEEKLY, _('Week(s)')),
(rrule.MONTHLY, _('Month(s)')),
(rrule.YEARLY, _('Year(s)')),
)
REPEAT_CHOICES = (
('count', _('By count')),
('until', _('Until date')),
)
ISO_WEEKDAYS_MAP = (
None,
rrule.MO,
rrule.TU,
rrule.WE,
rrule.TH,
rrule.FR,
rrule.SA,
rrule.SU
)
MINUTES_INTERVAL = swingtime_settings.TIMESLOT_INTERVAL.seconds // 60
SECONDS_INTERVAL = utils.time_delta_total_seconds(swingtime_settings.DEFAULT_OCCURRENCE_DURATION)
def timeslot_options(
interval=swingtime_settings.TIMESLOT_INTERVAL,
start_time=swingtime_settings.TIMESLOT_START_TIME,
end_delta=swingtime_settings.TIMESLOT_END_TIME_DURATION,
fmt=swingtime_settings.TIMESLOT_TIME_FORMAT
):
'''
Create a list of time slot options for use in swingtime forms.
The list is comprised of 2-tuples containing a 24-hour time value and a
12-hour temporal representation of that offset.
'''
dt = datetime.combine(date.today(), time(0))
dtstart = datetime.combine(dt.date(), start_time)
dtend = dtstart + end_delta
options = []
while dtstart <= dtend:
options.append((str(dtstart.time()), dtstart.strftime(fmt)))
dtstart += interval
return options
default_timeslot_options = timeslot_options()
default_timeslot_offset_options = timeslot_offset_options()
class MultipleIntegerField(forms.MultipleChoiceField):
'''
A form field for handling multiple integers.
'''
def __init__(self, choices, size=None, label=None, widget=None):
widget = widget or forms.SelectMultiple(attrs={'size': size or len(choices)})
super().__init__(
required=False,
choices=choices,
label=label,
widget=widget,
)
def clean(self, value):
return [int(i) for i in super().clean(value)]
class SplitDateTimeWidget(forms.MultiWidget):
'''
A Widget that splits datetime input into a SelectDateWidget for dates and
Select widget for times.
'''
def __init__(self, attrs=None):
widgets = (
SelectDateWidget(attrs=attrs),
forms.Select(choices=default_timeslot_options, attrs=attrs)
)
super().__init__(widgets)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class MultipleOccurrenceForm(forms.Form):
day = forms.DateField(
label=_('Date'),
initial=date.today,
widget=SelectDateWidget()
)
start_time_delta = forms.IntegerField(
label=_('Start time'),
widget=forms.Select(choices=default_timeslot_offset_options)
)
end_time_delta = forms.IntegerField(
label=_('End time'),
widget=forms.Select(choices=default_timeslot_offset_options)
)
# recurrence options
repeats = forms.ChoiceField(
choices=REPEAT_CHOICES,
initial='count',
label=_('Occurrences'),
widget=forms.RadioSelect()
)
count = forms.IntegerField(
label=_('Total Occurrences'),
initial=1,
required=False,
widget=forms.TextInput(attrs=dict(size=2, max_length=2))
)
until = forms.DateField(
required=False,
initial=date.today,
widget=SelectDateWidget()
)
freq = forms.IntegerField(
label=_('Frequency'),
initial=rrule.WEEKLY,
widget=forms.RadioSelect(choices=FREQUENCY_CHOICES),
)
interval = forms.IntegerField(
required=False,
initial='1',
widget=forms.TextInput(attrs=dict(size=3, max_length=3))
)
# weekly options
week_days = MultipleIntegerField(
WEEKDAY_SHORT,
label=_('Weekly options'),
widget=forms.CheckboxSelectMultiple
)
# monthly options
month_option = forms.ChoiceField(
choices=(('on', _('On the')), ('each', _('Each:'))),
initial='each',
widget=forms.RadioSelect(),
label=_('Monthly options')
)
month_ordinal = forms.IntegerField(
widget=forms.Select(choices=ORDINAL),
required=False
)
month_ordinal_day = forms.IntegerField(
widget=forms.Select(choices=WEEKDAY_LONG),
required=False
)
each_month_day = MultipleIntegerField(
[(i, i) for i in range(1, 32)],
widget=forms.CheckboxSelectMultiple
)
# yearly options
year_months = MultipleIntegerField(
MONTH_SHORT,
label=_('Yearly options'),
widget=forms.CheckboxSelectMultiple
)
is_year_month_ordinal = forms.BooleanField(required=False)
year_month_ordinal = forms.IntegerField(
widget=forms.Select(choices=ORDINAL),
required=False
)
year_month_ordinal_day = forms.IntegerField(
widget=forms.Select(choices=WEEKDAY_LONG),
required=False
)
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
dtstart = self.initial.get('dtstart', None)
if dtstart:
dtstart = dtstart.replace(
minute=((dtstart.minute // MINUTES_INTERVAL) * MINUTES_INTERVAL),
second=0,
microsecond=0
)
weekday = dtstart.isoweekday()
ordinal = dtstart.day // 7
ordinal = '%d' % (-1 if ordinal > 3 else ordinal + 1,)
midnight = datetime.combine(dtstart.date(), time(0, tzinfo=dtstart.tzinfo))
offset = (dtstart - midnight).seconds
self.initial.setdefault('day', dtstart)
self.initial.setdefault('week_days', '%d' % weekday)
self.initial.setdefault('month_ordinal', ordinal)
self.initial.setdefault('month_ordinal_day', '%d' % weekday)
self.initial.setdefault('each_month_day', ['%d' % dtstart.day])
self.initial.setdefault('year_months', ['%d' % dtstart.month])
self.initial.setdefault('year_month_ordinal', ordinal)
self.initial.setdefault('year_month_ordinal_day', '%d' % weekday)
self.initial.setdefault('start_time_delta', '%d' % offset)
self.initial.setdefault('end_time_delta', '%d' % (offset + SECONDS_INTERVAL,))
def clean(self):
if 'day' in self.cleaned_data:
day = datetime.combine(self.cleaned_data['day'], time(0))
self.cleaned_data['start_time'] = day + timedelta(
seconds=self.cleaned_data['start_time_delta']
)
self.cleaned_data['end_time'] = day + timedelta(
seconds=self.cleaned_data['end_time_delta']
)
return self.cleaned_data
def save(self, event):
if self.cleaned_data['repeats'] == 'count' and self.cleaned_data['count'] == 1:
params = {}
else:
params = self._build_rrule_params()
event.add_occurrences(
self.cleaned_data['start_time'],
self.cleaned_data['end_time'],
**params
)
return event
def _build_rrule_params(self):
iso = ISO_WEEKDAYS_MAP
data = self.cleaned_data
params = dict(
freq=data['freq'],
interval=data['interval'] or 1
)
if data['repeats'] == 'until':
params['until'] = data['until']
else:
params['count'] = data.get('count', 1)
if params['freq'] == rrule.WEEKLY:
params['byweekday'] = [iso[n] for n in data['week_days']]
elif params['freq'] == rrule.MONTHLY:
if 'on' == data['month_option']:
ordinal = data['month_ordinal']
day = iso[data['month_ordinal_day']]
params.update(byweekday=day, bysetpos=ordinal)
else:
params['bymonthday'] = data['each_month_day']
elif params['freq'] == rrule.YEARLY:
params['bymonth'] = data['year_months']
if data['is_year_month_ordinal']:
ordinal = data['year_month_ordinal']
day = iso[data['year_month_ordinal_day']]
params['byweekday'] = day(ordinal)
elif params['freq'] != rrule.DAILY:
raise NotImplementedError(_('Unknown interval rule ' + params['freq']))
return params
class EventForm(forms.ModelForm):
'''
A simple form for adding and updating Event attributes
'''
class Meta:
model = Event
fields = "__all__"
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
self.fields['description'].required = False
class SingleOccurrenceForm(forms.ModelForm):
'''
A simple form for adding and updating single Occurrence attributes
'''
start_time = forms.SplitDateTimeField(widget=SplitDateTimeWidget)
end_time = forms.SplitDateTimeField(widget=SplitDateTimeWidget)
class Meta:
model = Occurrence
fields = "__all__"
|
dakrauth/django-swingtime
|
swingtime/utils.py
|
month_boundaries
|
python
|
def month_boundaries(dt=None):
'''
Return a 2-tuple containing the datetime instances for the first and last
dates of the current month or using ``dt`` as a reference.
'''
dt = dt or date.today()
wkday, ndays = calendar.monthrange(dt.year, dt.month)
start = datetime(dt.year, dt.month, 1)
return (start, start + timedelta(ndays - 1))
|
Return a 2-tuple containing the datetime instances for the first and last
dates of the current month or using ``dt`` as a reference.
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/utils.py#L27-L36
| null |
'''
Common features and functions for swingtime
'''
import calendar
from collections import defaultdict
from datetime import datetime, date, time, timedelta
import itertools
from django.db.models.query import QuerySet
from django.utils.safestring import mark_safe
from django.utils.encoding import python_2_unicode_compatible
from dateutil import rrule
from .conf import swingtime_settings
from .models import EventType, Occurrence
def time_delta_total_seconds(time_delta):
'''
Calculate the total number of seconds represented by a
``datetime.timedelta`` object
'''
return time_delta.days * 3600 + time_delta.seconds
def default_css_class_cycler():
return itertools.cycle(('evt-even', 'evt-odd'))
def css_class_cycler():
'''
Return a dictionary keyed by ``EventType`` abbreviations, whose values are an
iterable or cycle of CSS class names.
'''
FMT = 'evt-{0}-{1}'.format
return defaultdict(default_css_class_cycler, (
(e.abbr, itertools.cycle((FMT(e.abbr, 'even'), FMT(e.abbr, 'odd'))))
for e in EventType.objects.all()
))
class BaseOccurrenceProxy(object):
'''
A simple wrapper class for handling the presentational aspects of an
``Occurrence`` instance.
'''
def __init__(self, occurrence, col):
self.column = col
self._occurrence = occurrence
self.event_class = ''
def __getattr__(self, name):
return getattr(self._occurrence, name)
def __str__(self):
return self.title
class DefaultOccurrenceProxy(BaseOccurrenceProxy):
CONTINUATION_STRING = '^^'
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
link = '<a href="%s">%s</a>' % (
self.get_absolute_url(),
self.title
)
self._str = itertools.chain(
(link,),
itertools.repeat(self.CONTINUATION_STRING)
)
def __str__(self):
return mark_safe(next(self._str))
def create_timeslot_table(
dt=None,
items=None,
start_time=swingtime_settings.TIMESLOT_START_TIME,
end_time_delta=swingtime_settings.TIMESLOT_END_TIME_DURATION,
time_delta=swingtime_settings.TIMESLOT_INTERVAL,
min_columns=swingtime_settings.TIMESLOT_MIN_COLUMNS,
css_class_cycles=css_class_cycler,
proxy_class=DefaultOccurrenceProxy
):
'''
Create a grid-like object representing a sequence of times (rows) and
columns where cells are either empty or reference a wrapper object for
event occasions that overlap a specific time slot.
Currently, there is an assumption that if an occurrence has a ``start_time``
that falls with the temporal scope of the grid, then that ``start_time`` will
also match an interval in the sequence of the computed row entries.
* ``dt`` - a ``datetime.datetime`` instance or ``None`` to default to now
* ``items`` - a queryset or sequence of ``Occurrence`` instances. If
``None``, default to the daily occurrences for ``dt``
* ``start_time`` - a ``datetime.time`` instance
* ``end_time_delta`` - a ``datetime.timedelta`` instance
* ``time_delta`` - a ``datetime.timedelta`` instance
* ``min_column`` - the minimum number of columns to show in the table
* ``css_class_cycles`` - if not ``None``, a callable returning a dictionary
keyed by desired ``EventType`` abbreviations with values that iterate over
progressive CSS class names for the particular abbreviation.
* ``proxy_class`` - a wrapper class for accessing an ``Occurrence`` object.
This class should also expose ``event_type`` and ``event_type`` attrs, and
handle the custom output via its __unicode__ method.
'''
dt = dt or datetime.now()
start_time = start_time.replace(tzinfo=dt.tzinfo) if not start_time.tzinfo else start_time
dtstart = datetime.combine(dt.date(), start_time)
dtend = dtstart + end_time_delta
if isinstance(items, QuerySet):
items = items._clone()
elif not items:
items = Occurrence.objects.daily_occurrences(dt).select_related('event')
# build a mapping of timeslot "buckets"
timeslots = {}
n = dtstart
while n <= dtend:
timeslots[n] = {}
n += time_delta
# fill the timeslot buckets with occurrence proxies
for item in sorted(items):
if item.end_time <= dtstart:
# this item began before the start of our schedle constraints
continue
if item.start_time > dtstart:
rowkey = current = item.start_time
else:
rowkey = current = dtstart
timeslot = timeslots.get(rowkey, None)
if timeslot is None:
# TODO fix atypical interval boundry spans
# This is rather draconian, we should probably try to find a better
# way to indicate that this item actually occurred between 2 intervals
# and to account for the fact that this item may be spanning cells
# but on weird intervals
continue
colkey = 0
while 1:
# keep searching for an open column to place this occurrence
if colkey not in timeslot:
proxy = proxy_class(item, colkey)
timeslot[colkey] = proxy
while current < item.end_time:
rowkey = current
row = timeslots.get(rowkey, None)
if row is None:
break
# we might want to put a sanity check in here to ensure that
# we aren't trampling some other entry, but by virtue of
# sorting all occurrence that shouldn't happen
row[colkey] = proxy
current += time_delta
break
colkey += 1
# determine the number of timeslot columns we should show
column_lens = [len(x) for x in timeslots.values()]
column_count = max((min_columns, max(column_lens) if column_lens else 0))
column_range = range(column_count)
empty_columns = ['' for x in column_range]
if css_class_cycles:
column_classes = dict([(i, css_class_cycles()) for i in column_range])
else:
column_classes = None
# create the chronological grid layout
table = []
for rowkey in sorted(timeslots.keys()):
cols = empty_columns[:]
for colkey in timeslots[rowkey]:
proxy = timeslots[rowkey][colkey]
cols[colkey] = proxy
if not proxy.event_class and column_classes:
proxy.event_class = next(column_classes[colkey][proxy.event_type.abbr])
table.append((rowkey, cols))
return table
|
dakrauth/django-swingtime
|
swingtime/utils.py
|
css_class_cycler
|
python
|
def css_class_cycler():
'''
Return a dictionary keyed by ``EventType`` abbreviations, whose values are an
iterable or cycle of CSS class names.
'''
FMT = 'evt-{0}-{1}'.format
return defaultdict(default_css_class_cycler, (
(e.abbr, itertools.cycle((FMT(e.abbr, 'even'), FMT(e.abbr, 'odd'))))
for e in EventType.objects.all()
))
|
Return a dictionary keyed by ``EventType`` abbreviations, whose values are an
iterable or cycle of CSS class names.
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/utils.py#L43-L53
| null |
'''
Common features and functions for swingtime
'''
import calendar
from collections import defaultdict
from datetime import datetime, date, time, timedelta
import itertools
from django.db.models.query import QuerySet
from django.utils.safestring import mark_safe
from django.utils.encoding import python_2_unicode_compatible
from dateutil import rrule
from .conf import swingtime_settings
from .models import EventType, Occurrence
def time_delta_total_seconds(time_delta):
'''
Calculate the total number of seconds represented by a
``datetime.timedelta`` object
'''
return time_delta.days * 3600 + time_delta.seconds
def month_boundaries(dt=None):
'''
Return a 2-tuple containing the datetime instances for the first and last
dates of the current month or using ``dt`` as a reference.
'''
dt = dt or date.today()
wkday, ndays = calendar.monthrange(dt.year, dt.month)
start = datetime(dt.year, dt.month, 1)
return (start, start + timedelta(ndays - 1))
def default_css_class_cycler():
return itertools.cycle(('evt-even', 'evt-odd'))
class BaseOccurrenceProxy(object):
'''
A simple wrapper class for handling the presentational aspects of an
``Occurrence`` instance.
'''
def __init__(self, occurrence, col):
self.column = col
self._occurrence = occurrence
self.event_class = ''
def __getattr__(self, name):
return getattr(self._occurrence, name)
def __str__(self):
return self.title
class DefaultOccurrenceProxy(BaseOccurrenceProxy):
CONTINUATION_STRING = '^^'
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
link = '<a href="%s">%s</a>' % (
self.get_absolute_url(),
self.title
)
self._str = itertools.chain(
(link,),
itertools.repeat(self.CONTINUATION_STRING)
)
def __str__(self):
return mark_safe(next(self._str))
def create_timeslot_table(
dt=None,
items=None,
start_time=swingtime_settings.TIMESLOT_START_TIME,
end_time_delta=swingtime_settings.TIMESLOT_END_TIME_DURATION,
time_delta=swingtime_settings.TIMESLOT_INTERVAL,
min_columns=swingtime_settings.TIMESLOT_MIN_COLUMNS,
css_class_cycles=css_class_cycler,
proxy_class=DefaultOccurrenceProxy
):
'''
Create a grid-like object representing a sequence of times (rows) and
columns where cells are either empty or reference a wrapper object for
event occasions that overlap a specific time slot.
Currently, there is an assumption that if an occurrence has a ``start_time``
that falls with the temporal scope of the grid, then that ``start_time`` will
also match an interval in the sequence of the computed row entries.
* ``dt`` - a ``datetime.datetime`` instance or ``None`` to default to now
* ``items`` - a queryset or sequence of ``Occurrence`` instances. If
``None``, default to the daily occurrences for ``dt``
* ``start_time`` - a ``datetime.time`` instance
* ``end_time_delta`` - a ``datetime.timedelta`` instance
* ``time_delta`` - a ``datetime.timedelta`` instance
* ``min_column`` - the minimum number of columns to show in the table
* ``css_class_cycles`` - if not ``None``, a callable returning a dictionary
keyed by desired ``EventType`` abbreviations with values that iterate over
progressive CSS class names for the particular abbreviation.
* ``proxy_class`` - a wrapper class for accessing an ``Occurrence`` object.
This class should also expose ``event_type`` and ``event_type`` attrs, and
handle the custom output via its __unicode__ method.
'''
dt = dt or datetime.now()
start_time = start_time.replace(tzinfo=dt.tzinfo) if not start_time.tzinfo else start_time
dtstart = datetime.combine(dt.date(), start_time)
dtend = dtstart + end_time_delta
if isinstance(items, QuerySet):
items = items._clone()
elif not items:
items = Occurrence.objects.daily_occurrences(dt).select_related('event')
# build a mapping of timeslot "buckets"
timeslots = {}
n = dtstart
while n <= dtend:
timeslots[n] = {}
n += time_delta
# fill the timeslot buckets with occurrence proxies
for item in sorted(items):
if item.end_time <= dtstart:
# this item began before the start of our schedle constraints
continue
if item.start_time > dtstart:
rowkey = current = item.start_time
else:
rowkey = current = dtstart
timeslot = timeslots.get(rowkey, None)
if timeslot is None:
# TODO fix atypical interval boundry spans
# This is rather draconian, we should probably try to find a better
# way to indicate that this item actually occurred between 2 intervals
# and to account for the fact that this item may be spanning cells
# but on weird intervals
continue
colkey = 0
while 1:
# keep searching for an open column to place this occurrence
if colkey not in timeslot:
proxy = proxy_class(item, colkey)
timeslot[colkey] = proxy
while current < item.end_time:
rowkey = current
row = timeslots.get(rowkey, None)
if row is None:
break
# we might want to put a sanity check in here to ensure that
# we aren't trampling some other entry, but by virtue of
# sorting all occurrence that shouldn't happen
row[colkey] = proxy
current += time_delta
break
colkey += 1
# determine the number of timeslot columns we should show
column_lens = [len(x) for x in timeslots.values()]
column_count = max((min_columns, max(column_lens) if column_lens else 0))
column_range = range(column_count)
empty_columns = ['' for x in column_range]
if css_class_cycles:
column_classes = dict([(i, css_class_cycles()) for i in column_range])
else:
column_classes = None
# create the chronological grid layout
table = []
for rowkey in sorted(timeslots.keys()):
cols = empty_columns[:]
for colkey in timeslots[rowkey]:
proxy = timeslots[rowkey][colkey]
cols[colkey] = proxy
if not proxy.event_class and column_classes:
proxy.event_class = next(column_classes[colkey][proxy.event_type.abbr])
table.append((rowkey, cols))
return table
|
dakrauth/django-swingtime
|
swingtime/utils.py
|
create_timeslot_table
|
python
|
def create_timeslot_table(
dt=None,
items=None,
start_time=swingtime_settings.TIMESLOT_START_TIME,
end_time_delta=swingtime_settings.TIMESLOT_END_TIME_DURATION,
time_delta=swingtime_settings.TIMESLOT_INTERVAL,
min_columns=swingtime_settings.TIMESLOT_MIN_COLUMNS,
css_class_cycles=css_class_cycler,
proxy_class=DefaultOccurrenceProxy
):
'''
Create a grid-like object representing a sequence of times (rows) and
columns where cells are either empty or reference a wrapper object for
event occasions that overlap a specific time slot.
Currently, there is an assumption that if an occurrence has a ``start_time``
that falls with the temporal scope of the grid, then that ``start_time`` will
also match an interval in the sequence of the computed row entries.
* ``dt`` - a ``datetime.datetime`` instance or ``None`` to default to now
* ``items`` - a queryset or sequence of ``Occurrence`` instances. If
``None``, default to the daily occurrences for ``dt``
* ``start_time`` - a ``datetime.time`` instance
* ``end_time_delta`` - a ``datetime.timedelta`` instance
* ``time_delta`` - a ``datetime.timedelta`` instance
* ``min_column`` - the minimum number of columns to show in the table
* ``css_class_cycles`` - if not ``None``, a callable returning a dictionary
keyed by desired ``EventType`` abbreviations with values that iterate over
progressive CSS class names for the particular abbreviation.
* ``proxy_class`` - a wrapper class for accessing an ``Occurrence`` object.
This class should also expose ``event_type`` and ``event_type`` attrs, and
handle the custom output via its __unicode__ method.
'''
dt = dt or datetime.now()
start_time = start_time.replace(tzinfo=dt.tzinfo) if not start_time.tzinfo else start_time
dtstart = datetime.combine(dt.date(), start_time)
dtend = dtstart + end_time_delta
if isinstance(items, QuerySet):
items = items._clone()
elif not items:
items = Occurrence.objects.daily_occurrences(dt).select_related('event')
# build a mapping of timeslot "buckets"
timeslots = {}
n = dtstart
while n <= dtend:
timeslots[n] = {}
n += time_delta
# fill the timeslot buckets with occurrence proxies
for item in sorted(items):
if item.end_time <= dtstart:
# this item began before the start of our schedle constraints
continue
if item.start_time > dtstart:
rowkey = current = item.start_time
else:
rowkey = current = dtstart
timeslot = timeslots.get(rowkey, None)
if timeslot is None:
# TODO fix atypical interval boundry spans
# This is rather draconian, we should probably try to find a better
# way to indicate that this item actually occurred between 2 intervals
# and to account for the fact that this item may be spanning cells
# but on weird intervals
continue
colkey = 0
while 1:
# keep searching for an open column to place this occurrence
if colkey not in timeslot:
proxy = proxy_class(item, colkey)
timeslot[colkey] = proxy
while current < item.end_time:
rowkey = current
row = timeslots.get(rowkey, None)
if row is None:
break
# we might want to put a sanity check in here to ensure that
# we aren't trampling some other entry, but by virtue of
# sorting all occurrence that shouldn't happen
row[colkey] = proxy
current += time_delta
break
colkey += 1
# determine the number of timeslot columns we should show
column_lens = [len(x) for x in timeslots.values()]
column_count = max((min_columns, max(column_lens) if column_lens else 0))
column_range = range(column_count)
empty_columns = ['' for x in column_range]
if css_class_cycles:
column_classes = dict([(i, css_class_cycles()) for i in column_range])
else:
column_classes = None
# create the chronological grid layout
table = []
for rowkey in sorted(timeslots.keys()):
cols = empty_columns[:]
for colkey in timeslots[rowkey]:
proxy = timeslots[rowkey][colkey]
cols[colkey] = proxy
if not proxy.event_class and column_classes:
proxy.event_class = next(column_classes[colkey][proxy.event_type.abbr])
table.append((rowkey, cols))
return table
|
Create a grid-like object representing a sequence of times (rows) and
columns where cells are either empty or reference a wrapper object for
event occasions that overlap a specific time slot.
Currently, there is an assumption that if an occurrence has a ``start_time``
that falls with the temporal scope of the grid, then that ``start_time`` will
also match an interval in the sequence of the computed row entries.
* ``dt`` - a ``datetime.datetime`` instance or ``None`` to default to now
* ``items`` - a queryset or sequence of ``Occurrence`` instances. If
``None``, default to the daily occurrences for ``dt``
* ``start_time`` - a ``datetime.time`` instance
* ``end_time_delta`` - a ``datetime.timedelta`` instance
* ``time_delta`` - a ``datetime.timedelta`` instance
* ``min_column`` - the minimum number of columns to show in the table
* ``css_class_cycles`` - if not ``None``, a callable returning a dictionary
keyed by desired ``EventType`` abbreviations with values that iterate over
progressive CSS class names for the particular abbreviation.
* ``proxy_class`` - a wrapper class for accessing an ``Occurrence`` object.
This class should also expose ``event_type`` and ``event_type`` attrs, and
handle the custom output via its __unicode__ method.
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/utils.py#L95-L211
| null |
'''
Common features and functions for swingtime
'''
import calendar
from collections import defaultdict
from datetime import datetime, date, time, timedelta
import itertools
from django.db.models.query import QuerySet
from django.utils.safestring import mark_safe
from django.utils.encoding import python_2_unicode_compatible
from dateutil import rrule
from .conf import swingtime_settings
from .models import EventType, Occurrence
def time_delta_total_seconds(time_delta):
'''
Calculate the total number of seconds represented by a
``datetime.timedelta`` object
'''
return time_delta.days * 3600 + time_delta.seconds
def month_boundaries(dt=None):
'''
Return a 2-tuple containing the datetime instances for the first and last
dates of the current month or using ``dt`` as a reference.
'''
dt = dt or date.today()
wkday, ndays = calendar.monthrange(dt.year, dt.month)
start = datetime(dt.year, dt.month, 1)
return (start, start + timedelta(ndays - 1))
def default_css_class_cycler():
return itertools.cycle(('evt-even', 'evt-odd'))
def css_class_cycler():
'''
Return a dictionary keyed by ``EventType`` abbreviations, whose values are an
iterable or cycle of CSS class names.
'''
FMT = 'evt-{0}-{1}'.format
return defaultdict(default_css_class_cycler, (
(e.abbr, itertools.cycle((FMT(e.abbr, 'even'), FMT(e.abbr, 'odd'))))
for e in EventType.objects.all()
))
class BaseOccurrenceProxy(object):
'''
A simple wrapper class for handling the presentational aspects of an
``Occurrence`` instance.
'''
def __init__(self, occurrence, col):
self.column = col
self._occurrence = occurrence
self.event_class = ''
def __getattr__(self, name):
return getattr(self._occurrence, name)
def __str__(self):
return self.title
class DefaultOccurrenceProxy(BaseOccurrenceProxy):
CONTINUATION_STRING = '^^'
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
link = '<a href="%s">%s</a>' % (
self.get_absolute_url(),
self.title
)
self._str = itertools.chain(
(link,),
itertools.repeat(self.CONTINUATION_STRING)
)
def __str__(self):
return mark_safe(next(self._str))
|
dakrauth/django-swingtime
|
swingtime/models.py
|
create_event
|
python
|
def create_event(
title,
event_type,
description='',
start_time=None,
end_time=None,
note=None,
**rrule_params
):
'''
Convenience function to create an ``Event``, optionally create an
``EventType``, and associated ``Occurrence``s. ``Occurrence`` creation
rules match those for ``Event.add_occurrences``.
Returns the newly created ``Event`` instance.
Parameters
``event_type``
can be either an ``EventType`` object or 2-tuple of ``(abbreviation,label)``,
from which an ``EventType`` is either created or retrieved.
``start_time``
will default to the current hour if ``None``
``end_time``
will default to ``start_time`` plus swingtime_settings.DEFAULT_OCCURRENCE_DURATION
hour if ``None``
``freq``, ``count``, ``rrule_params``
follow the ``dateutils`` API (see http://labix.org/python-dateutil)
'''
if isinstance(event_type, tuple):
event_type, created = EventType.objects.get_or_create(
abbr=event_type[0],
label=event_type[1]
)
event = Event.objects.create(
title=title,
description=description,
event_type=event_type
)
if note is not None:
event.notes.create(note=note)
start_time = start_time or datetime.now().replace(
minute=0,
second=0,
microsecond=0
)
end_time = end_time or (start_time + swingtime_settings.DEFAULT_OCCURRENCE_DURATION)
event.add_occurrences(start_time, end_time, **rrule_params)
return event
|
Convenience function to create an ``Event``, optionally create an
``EventType``, and associated ``Occurrence``s. ``Occurrence`` creation
rules match those for ``Event.add_occurrences``.
Returns the newly created ``Event`` instance.
Parameters
``event_type``
can be either an ``EventType`` object or 2-tuple of ``(abbreviation,label)``,
from which an ``EventType`` is either created or retrieved.
``start_time``
will default to the current hour if ``None``
``end_time``
will default to ``start_time`` plus swingtime_settings.DEFAULT_OCCURRENCE_DURATION
hour if ``None``
``freq``, ``count``, ``rrule_params``
follow the ``dateutils`` API (see http://labix.org/python-dateutil)
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/models.py#L208-L265
| null |
from datetime import datetime, date, timedelta
from dateutil import rrule
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.urls import reverse
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from .conf import swingtime_settings
__all__ = (
'Note',
'EventType',
'Event',
'Occurrence',
'create_event'
)
class Note(models.Model):
'''
A generic model for adding simple, arbitrary notes to other models such as
``Event`` or ``Occurrence``.
'''
note = models.TextField(_('note'))
created = models.DateTimeField(_('created'), auto_now_add=True)
content_type = models.ForeignKey(
ContentType,
verbose_name=_('content type'),
on_delete=models.CASCADE
)
object_id = models.PositiveIntegerField(_('object id'))
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
verbose_name = _('note')
verbose_name_plural = _('notes')
def __str__(self):
return self.note
class EventType(models.Model):
'''
Simple ``Event`` classifcation.
'''
abbr = models.CharField(_('abbreviation'), max_length=4, unique=True)
label = models.CharField(_('label'), max_length=50)
class Meta:
verbose_name = _('event type')
verbose_name_plural = _('event types')
def __str__(self):
return self.label
class Event(models.Model):
'''
Container model for general metadata and associated ``Occurrence`` entries.
'''
title = models.CharField(_('title'), max_length=32)
description = models.CharField(_('description'), max_length=100)
event_type = models.ForeignKey(
EventType,
verbose_name=_('event type'),
on_delete=models.CASCADE
)
notes = GenericRelation(Note, verbose_name=_('notes'))
class Meta:
verbose_name = _('event')
verbose_name_plural = _('events')
ordering = ('title', )
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('swingtime-event', args=[str(self.id)])
def add_occurrences(self, start_time, end_time, **rrule_params):
'''
Add one or more occurences to the event using a comparable API to
``dateutil.rrule``.
If ``rrule_params`` does not contain a ``freq``, one will be defaulted
to ``rrule.DAILY``.
Because ``rrule.rrule`` returns an iterator that can essentially be
unbounded, we need to slightly alter the expected behavior here in order
to enforce a finite number of occurrence creation.
If both ``count`` and ``until`` entries are missing from ``rrule_params``,
only a single ``Occurrence`` instance will be created using the exact
``start_time`` and ``end_time`` values.
'''
count = rrule_params.get('count')
until = rrule_params.get('until')
if not (count or until):
self.occurrence_set.create(start_time=start_time, end_time=end_time)
else:
rrule_params.setdefault('freq', rrule.DAILY)
delta = end_time - start_time
occurrences = []
for ev in rrule.rrule(dtstart=start_time, **rrule_params):
occurrences.append(Occurrence(start_time=ev, end_time=ev + delta, event=self))
self.occurrence_set.bulk_create(occurrences)
def upcoming_occurrences(self):
'''
Return all occurrences that are set to start on or after the current
time.
'''
return self.occurrence_set.filter(start_time__gte=datetime.now())
def next_occurrence(self):
'''
Return the single occurrence set to start on or after the current time
if available, otherwise ``None``.
'''
upcoming = self.upcoming_occurrences()
return upcoming[0] if upcoming else None
def daily_occurrences(self, dt=None):
'''
Convenience method wrapping ``Occurrence.objects.daily_occurrences``.
'''
return Occurrence.objects.daily_occurrences(dt=dt, event=self)
class OccurrenceManager(models.Manager):
def daily_occurrences(self, dt=None, event=None):
'''
Returns a queryset of for instances that have any overlap with a
particular day.
* ``dt`` may be either a datetime.datetime, datetime.date object, or
``None``. If ``None``, default to the current day.
* ``event`` can be an ``Event`` instance for further filtering.
'''
dt = dt or datetime.now()
start = datetime(dt.year, dt.month, dt.day)
end = start.replace(hour=23, minute=59, second=59)
qs = self.filter(
models.Q(
start_time__gte=start,
start_time__lte=end,
) |
models.Q(
end_time__gte=start,
end_time__lte=end,
) |
models.Q(
start_time__lt=start,
end_time__gt=end
)
)
return qs.filter(event=event) if event else qs
class Occurrence(models.Model):
'''
Represents the start end time for a specific occurrence of a master ``Event``
object.
'''
start_time = models.DateTimeField(_('start time'))
end_time = models.DateTimeField(_('end time'))
event = models.ForeignKey(
Event,
verbose_name=_('event'),
editable=False,
on_delete=models.CASCADE
)
notes = GenericRelation(Note, verbose_name=_('notes'))
objects = OccurrenceManager()
class Meta:
verbose_name = _('occurrence')
verbose_name_plural = _('occurrences')
ordering = ('start_time', 'end_time')
base_manager_name = 'objects'
def __str__(self):
return u'{}: {}'.format(self.title, self.start_time.isoformat())
def get_absolute_url(self):
return reverse('swingtime-occurrence', args=[str(self.event.id), str(self.id)])
def __lt__(self, other):
return self.start_time < other.start_time
@property
def title(self):
return self.event.title
@property
def event_type(self):
return self.event.event_type
|
dakrauth/django-swingtime
|
swingtime/models.py
|
Event.add_occurrences
|
python
|
def add_occurrences(self, start_time, end_time, **rrule_params):
'''
Add one or more occurences to the event using a comparable API to
``dateutil.rrule``.
If ``rrule_params`` does not contain a ``freq``, one will be defaulted
to ``rrule.DAILY``.
Because ``rrule.rrule`` returns an iterator that can essentially be
unbounded, we need to slightly alter the expected behavior here in order
to enforce a finite number of occurrence creation.
If both ``count`` and ``until`` entries are missing from ``rrule_params``,
only a single ``Occurrence`` instance will be created using the exact
``start_time`` and ``end_time`` values.
'''
count = rrule_params.get('count')
until = rrule_params.get('until')
if not (count or until):
self.occurrence_set.create(start_time=start_time, end_time=end_time)
else:
rrule_params.setdefault('freq', rrule.DAILY)
delta = end_time - start_time
occurrences = []
for ev in rrule.rrule(dtstart=start_time, **rrule_params):
occurrences.append(Occurrence(start_time=ev, end_time=ev + delta, event=self))
self.occurrence_set.bulk_create(occurrences)
|
Add one or more occurences to the event using a comparable API to
``dateutil.rrule``.
If ``rrule_params`` does not contain a ``freq``, one will be defaulted
to ``rrule.DAILY``.
Because ``rrule.rrule`` returns an iterator that can essentially be
unbounded, we need to slightly alter the expected behavior here in order
to enforce a finite number of occurrence creation.
If both ``count`` and ``until`` entries are missing from ``rrule_params``,
only a single ``Occurrence`` instance will be created using the exact
``start_time`` and ``end_time`` values.
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/models.py#L84-L110
| null |
class Event(models.Model):
'''
Container model for general metadata and associated ``Occurrence`` entries.
'''
title = models.CharField(_('title'), max_length=32)
description = models.CharField(_('description'), max_length=100)
event_type = models.ForeignKey(
EventType,
verbose_name=_('event type'),
on_delete=models.CASCADE
)
notes = GenericRelation(Note, verbose_name=_('notes'))
class Meta:
verbose_name = _('event')
verbose_name_plural = _('events')
ordering = ('title', )
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('swingtime-event', args=[str(self.id)])
def upcoming_occurrences(self):
'''
Return all occurrences that are set to start on or after the current
time.
'''
return self.occurrence_set.filter(start_time__gte=datetime.now())
def next_occurrence(self):
'''
Return the single occurrence set to start on or after the current time
if available, otherwise ``None``.
'''
upcoming = self.upcoming_occurrences()
return upcoming[0] if upcoming else None
def daily_occurrences(self, dt=None):
'''
Convenience method wrapping ``Occurrence.objects.daily_occurrences``.
'''
return Occurrence.objects.daily_occurrences(dt=dt, event=self)
|
dakrauth/django-swingtime
|
swingtime/models.py
|
Event.daily_occurrences
|
python
|
def daily_occurrences(self, dt=None):
'''
Convenience method wrapping ``Occurrence.objects.daily_occurrences``.
'''
return Occurrence.objects.daily_occurrences(dt=dt, event=self)
|
Convenience method wrapping ``Occurrence.objects.daily_occurrences``.
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/models.py#L127-L131
| null |
class Event(models.Model):
'''
Container model for general metadata and associated ``Occurrence`` entries.
'''
title = models.CharField(_('title'), max_length=32)
description = models.CharField(_('description'), max_length=100)
event_type = models.ForeignKey(
EventType,
verbose_name=_('event type'),
on_delete=models.CASCADE
)
notes = GenericRelation(Note, verbose_name=_('notes'))
class Meta:
verbose_name = _('event')
verbose_name_plural = _('events')
ordering = ('title', )
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('swingtime-event', args=[str(self.id)])
def add_occurrences(self, start_time, end_time, **rrule_params):
'''
Add one or more occurences to the event using a comparable API to
``dateutil.rrule``.
If ``rrule_params`` does not contain a ``freq``, one will be defaulted
to ``rrule.DAILY``.
Because ``rrule.rrule`` returns an iterator that can essentially be
unbounded, we need to slightly alter the expected behavior here in order
to enforce a finite number of occurrence creation.
If both ``count`` and ``until`` entries are missing from ``rrule_params``,
only a single ``Occurrence`` instance will be created using the exact
``start_time`` and ``end_time`` values.
'''
count = rrule_params.get('count')
until = rrule_params.get('until')
if not (count or until):
self.occurrence_set.create(start_time=start_time, end_time=end_time)
else:
rrule_params.setdefault('freq', rrule.DAILY)
delta = end_time - start_time
occurrences = []
for ev in rrule.rrule(dtstart=start_time, **rrule_params):
occurrences.append(Occurrence(start_time=ev, end_time=ev + delta, event=self))
self.occurrence_set.bulk_create(occurrences)
def upcoming_occurrences(self):
'''
Return all occurrences that are set to start on or after the current
time.
'''
return self.occurrence_set.filter(start_time__gte=datetime.now())
def next_occurrence(self):
'''
Return the single occurrence set to start on or after the current time
if available, otherwise ``None``.
'''
upcoming = self.upcoming_occurrences()
return upcoming[0] if upcoming else None
|
dakrauth/django-swingtime
|
swingtime/models.py
|
OccurrenceManager.daily_occurrences
|
python
|
def daily_occurrences(self, dt=None, event=None):
'''
Returns a queryset of for instances that have any overlap with a
particular day.
* ``dt`` may be either a datetime.datetime, datetime.date object, or
``None``. If ``None``, default to the current day.
* ``event`` can be an ``Event`` instance for further filtering.
'''
dt = dt or datetime.now()
start = datetime(dt.year, dt.month, dt.day)
end = start.replace(hour=23, minute=59, second=59)
qs = self.filter(
models.Q(
start_time__gte=start,
start_time__lte=end,
) |
models.Q(
end_time__gte=start,
end_time__lte=end,
) |
models.Q(
start_time__lt=start,
end_time__gt=end
)
)
return qs.filter(event=event) if event else qs
|
Returns a queryset of for instances that have any overlap with a
particular day.
* ``dt`` may be either a datetime.datetime, datetime.date object, or
``None``. If ``None``, default to the current day.
* ``event`` can be an ``Event`` instance for further filtering.
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/models.py#L136-L164
| null |
class OccurrenceManager(models.Manager):
|
dakrauth/django-swingtime
|
swingtime/views.py
|
event_listing
|
python
|
def event_listing(
request,
template='swingtime/event_list.html',
events=None,
**extra_context
):
'''
View all ``events``.
If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s.
Context parameters:
``events``
an iterable of ``Event`` objects
... plus all values passed in via **extra_context
'''
events = events or Event.objects.all()
extra_context['events'] = events
return render(request, template, extra_context)
|
View all ``events``.
If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s.
Context parameters:
``events``
an iterable of ``Event`` objects
... plus all values passed in via **extra_context
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/views.py#L20-L40
| null |
import calendar
import itertools
import logging
from datetime import datetime, timedelta, time
from dateutil import parser
from django import http
from django.db import models
from django.template.context import RequestContext
from django.shortcuts import get_object_or_404, render
from .models import Event, Occurrence
from . import utils, forms
from .conf import swingtime_settings
if swingtime_settings.CALENDAR_FIRST_WEEKDAY is not None:
calendar.setfirstweekday(swingtime_settings.CALENDAR_FIRST_WEEKDAY)
def event_view(
request,
pk,
template='swingtime/event_detail.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
View an ``Event`` instance and optionally update either the event or its
occurrences.
Context parameters:
``event``
the event keyed by ``pk``
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
event = get_object_or_404(Event, pk=pk)
event_form = recurrence_form = None
if request.method == 'POST':
if '_update' in request.POST:
event_form = event_form_class(request.POST, instance=event)
if event_form.is_valid():
event_form.save(event)
return http.HttpResponseRedirect(request.path)
elif '_add' in request.POST:
recurrence_form = recurrence_form_class(request.POST)
if recurrence_form.is_valid():
recurrence_form.save(event)
return http.HttpResponseRedirect(request.path)
else:
return http.HttpResponseBadRequest('Bad Request')
data = {
'event': event,
'event_form': event_form or event_form_class(instance=event),
'recurrence_form': recurrence_form or recurrence_form_class(
initial={'dtstart': datetime.now()}
)
}
return render(request, template, data)
def occurrence_view(
request,
event_pk,
pk,
template='swingtime/occurrence_detail.html',
form_class=forms.SingleOccurrenceForm
):
'''
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
'''
occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk)
if request.method == 'POST':
form = form_class(request.POST, instance=occurrence)
if form.is_valid():
form.save()
return http.HttpResponseRedirect(request.path)
else:
form = form_class(instance=occurrence)
return render(request, template, {'occurrence': occurrence, 'form': form})
def add_event(
request,
template='swingtime/add_event.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s.
Context parameters:
``dtstart``
a datetime.datetime object representing the GET request value if present,
otherwise None
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
dtstart = None
if request.method == 'POST':
event_form = event_form_class(request.POST)
recurrence_form = recurrence_form_class(request.POST)
if event_form.is_valid() and recurrence_form.is_valid():
event = event_form.save()
recurrence_form.save(event)
return http.HttpResponseRedirect(event.get_absolute_url())
else:
if 'dtstart' in request.GET:
try:
dtstart = parser.parse(request.GET['dtstart'])
except(TypeError, ValueError) as exc:
# TODO: A badly formatted date is passed to add_event
logging.warning(exc)
dtstart = dtstart or datetime.now()
event_form = event_form_class()
recurrence_form = recurrence_form_class(initial={'dtstart': dtstart})
return render(
request,
template,
{'dtstart': dtstart, 'event_form': event_form, 'recurrence_form': recurrence_form}
)
def _datetime_view(
request,
template,
dt,
timeslot_factory=None,
items=None,
params=None
):
'''
Build a time slot grid representation for the given datetime ``dt``. See
utils.create_timeslot_table documentation for items and params.
Context parameters:
``day``
the specified datetime value (dt)
``next_day``
day + 1 day
``prev_day``
day - 1 day
``timeslots``
time slot grid of (time, cells) rows
'''
timeslot_factory = timeslot_factory or utils.create_timeslot_table
params = params or {}
return render(request, template, {
'day': dt,
'next_day': dt + timedelta(days=+1),
'prev_day': dt + timedelta(days=-1),
'timeslots': timeslot_factory(dt, items, **params)
})
def day_view(request, year, month, day, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
dt = datetime(int(year), int(month), int(day))
return _datetime_view(request, template, dt, **params)
def today_view(request, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
return _datetime_view(request, template, datetime.now(), **params)
def year_view(request, year, template='swingtime/yearly_view.html', queryset=None):
'''
Context parameters:
``year``
an integer value for the year in questin
``next_year``
year + 1
``last_year``
year - 1
``by_month``
a sorted list of (month, occurrences) tuples where month is a
datetime.datetime object for the first day of a month and occurrences
is a (potentially empty) list of values for that month. Only months
which have at least 1 occurrence is represented in the list
'''
year = int(year)
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(
models.Q(start_time__year=year) |
models.Q(end_time__year=year)
)
def group_key(o):
return datetime(
year,
o.start_time.month if o.start_time.year == year else o.end_time.month,
1
)
return render(request, template, {
'year': year,
'by_month': [(dt, list(o)) for dt, o in itertools.groupby(occurrences, group_key)],
'next_year': year + 1,
'last_year': year - 1
})
def month_view(
request,
year,
month,
template='swingtime/monthly_view.html',
queryset=None
):
'''
Render a tradional calendar grid view with temporal navigation variables.
Context parameters:
``today``
the current datetime.datetime value
``calendar``
a list of rows containing (day, items) cells, where day is the day of
the month integer and items is a (potentially empty) list of occurrence
for the day
``this_month``
a datetime.datetime representing the first day of the month
``next_month``
this_month + 1 month
``last_month``
this_month - 1 month
'''
year, month = int(year), int(month)
cal = calendar.monthcalendar(year, month)
dtstart = datetime(year, month, 1)
last_day = max(cal[-1])
dtend = datetime(year, month, last_day)
# TODO Whether to include those occurrences that started in the previous
# month but end in this month?
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(start_time__year=year, start_time__month=month)
def start_day(o):
return o.start_time.day
by_day = dict([(dt, list(o)) for dt, o in itertools.groupby(occurrences, start_day)])
data = {
'today': datetime.now(),
'calendar': [[(d, by_day.get(d, [])) for d in row] for row in cal],
'this_month': dtstart,
'next_month': dtstart + timedelta(days=+last_day),
'last_month': dtstart + timedelta(days=-1),
}
return render(request, template, data)
|
dakrauth/django-swingtime
|
swingtime/views.py
|
event_view
|
python
|
def event_view(
request,
pk,
template='swingtime/event_detail.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
View an ``Event`` instance and optionally update either the event or its
occurrences.
Context parameters:
``event``
the event keyed by ``pk``
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
event = get_object_or_404(Event, pk=pk)
event_form = recurrence_form = None
if request.method == 'POST':
if '_update' in request.POST:
event_form = event_form_class(request.POST, instance=event)
if event_form.is_valid():
event_form.save(event)
return http.HttpResponseRedirect(request.path)
elif '_add' in request.POST:
recurrence_form = recurrence_form_class(request.POST)
if recurrence_form.is_valid():
recurrence_form.save(event)
return http.HttpResponseRedirect(request.path)
else:
return http.HttpResponseBadRequest('Bad Request')
data = {
'event': event,
'event_form': event_form or event_form_class(instance=event),
'recurrence_form': recurrence_form or recurrence_form_class(
initial={'dtstart': datetime.now()}
)
}
return render(request, template, data)
|
View an ``Event`` instance and optionally update either the event or its
occurrences.
Context parameters:
``event``
the event keyed by ``pk``
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/views.py#L43-L88
| null |
import calendar
import itertools
import logging
from datetime import datetime, timedelta, time
from dateutil import parser
from django import http
from django.db import models
from django.template.context import RequestContext
from django.shortcuts import get_object_or_404, render
from .models import Event, Occurrence
from . import utils, forms
from .conf import swingtime_settings
if swingtime_settings.CALENDAR_FIRST_WEEKDAY is not None:
calendar.setfirstweekday(swingtime_settings.CALENDAR_FIRST_WEEKDAY)
def event_listing(
request,
template='swingtime/event_list.html',
events=None,
**extra_context
):
'''
View all ``events``.
If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s.
Context parameters:
``events``
an iterable of ``Event`` objects
... plus all values passed in via **extra_context
'''
events = events or Event.objects.all()
extra_context['events'] = events
return render(request, template, extra_context)
def occurrence_view(
request,
event_pk,
pk,
template='swingtime/occurrence_detail.html',
form_class=forms.SingleOccurrenceForm
):
'''
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
'''
occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk)
if request.method == 'POST':
form = form_class(request.POST, instance=occurrence)
if form.is_valid():
form.save()
return http.HttpResponseRedirect(request.path)
else:
form = form_class(instance=occurrence)
return render(request, template, {'occurrence': occurrence, 'form': form})
def add_event(
request,
template='swingtime/add_event.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s.
Context parameters:
``dtstart``
a datetime.datetime object representing the GET request value if present,
otherwise None
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
dtstart = None
if request.method == 'POST':
event_form = event_form_class(request.POST)
recurrence_form = recurrence_form_class(request.POST)
if event_form.is_valid() and recurrence_form.is_valid():
event = event_form.save()
recurrence_form.save(event)
return http.HttpResponseRedirect(event.get_absolute_url())
else:
if 'dtstart' in request.GET:
try:
dtstart = parser.parse(request.GET['dtstart'])
except(TypeError, ValueError) as exc:
# TODO: A badly formatted date is passed to add_event
logging.warning(exc)
dtstart = dtstart or datetime.now()
event_form = event_form_class()
recurrence_form = recurrence_form_class(initial={'dtstart': dtstart})
return render(
request,
template,
{'dtstart': dtstart, 'event_form': event_form, 'recurrence_form': recurrence_form}
)
def _datetime_view(
request,
template,
dt,
timeslot_factory=None,
items=None,
params=None
):
'''
Build a time slot grid representation for the given datetime ``dt``. See
utils.create_timeslot_table documentation for items and params.
Context parameters:
``day``
the specified datetime value (dt)
``next_day``
day + 1 day
``prev_day``
day - 1 day
``timeslots``
time slot grid of (time, cells) rows
'''
timeslot_factory = timeslot_factory or utils.create_timeslot_table
params = params or {}
return render(request, template, {
'day': dt,
'next_day': dt + timedelta(days=+1),
'prev_day': dt + timedelta(days=-1),
'timeslots': timeslot_factory(dt, items, **params)
})
def day_view(request, year, month, day, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
dt = datetime(int(year), int(month), int(day))
return _datetime_view(request, template, dt, **params)
def today_view(request, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
return _datetime_view(request, template, datetime.now(), **params)
def year_view(request, year, template='swingtime/yearly_view.html', queryset=None):
'''
Context parameters:
``year``
an integer value for the year in questin
``next_year``
year + 1
``last_year``
year - 1
``by_month``
a sorted list of (month, occurrences) tuples where month is a
datetime.datetime object for the first day of a month and occurrences
is a (potentially empty) list of values for that month. Only months
which have at least 1 occurrence is represented in the list
'''
year = int(year)
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(
models.Q(start_time__year=year) |
models.Q(end_time__year=year)
)
def group_key(o):
return datetime(
year,
o.start_time.month if o.start_time.year == year else o.end_time.month,
1
)
return render(request, template, {
'year': year,
'by_month': [(dt, list(o)) for dt, o in itertools.groupby(occurrences, group_key)],
'next_year': year + 1,
'last_year': year - 1
})
def month_view(
request,
year,
month,
template='swingtime/monthly_view.html',
queryset=None
):
'''
Render a tradional calendar grid view with temporal navigation variables.
Context parameters:
``today``
the current datetime.datetime value
``calendar``
a list of rows containing (day, items) cells, where day is the day of
the month integer and items is a (potentially empty) list of occurrence
for the day
``this_month``
a datetime.datetime representing the first day of the month
``next_month``
this_month + 1 month
``last_month``
this_month - 1 month
'''
year, month = int(year), int(month)
cal = calendar.monthcalendar(year, month)
dtstart = datetime(year, month, 1)
last_day = max(cal[-1])
dtend = datetime(year, month, last_day)
# TODO Whether to include those occurrences that started in the previous
# month but end in this month?
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(start_time__year=year, start_time__month=month)
def start_day(o):
return o.start_time.day
by_day = dict([(dt, list(o)) for dt, o in itertools.groupby(occurrences, start_day)])
data = {
'today': datetime.now(),
'calendar': [[(d, by_day.get(d, [])) for d in row] for row in cal],
'this_month': dtstart,
'next_month': dtstart + timedelta(days=+last_day),
'last_month': dtstart + timedelta(days=-1),
}
return render(request, template, data)
|
dakrauth/django-swingtime
|
swingtime/views.py
|
occurrence_view
|
python
|
def occurrence_view(
request,
event_pk,
pk,
template='swingtime/occurrence_detail.html',
form_class=forms.SingleOccurrenceForm
):
'''
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
'''
occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk)
if request.method == 'POST':
form = form_class(request.POST, instance=occurrence)
if form.is_valid():
form.save()
return http.HttpResponseRedirect(request.path)
else:
form = form_class(instance=occurrence)
return render(request, template, {'occurrence': occurrence, 'form': form})
|
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/views.py#L91-L118
| null |
import calendar
import itertools
import logging
from datetime import datetime, timedelta, time
from dateutil import parser
from django import http
from django.db import models
from django.template.context import RequestContext
from django.shortcuts import get_object_or_404, render
from .models import Event, Occurrence
from . import utils, forms
from .conf import swingtime_settings
if swingtime_settings.CALENDAR_FIRST_WEEKDAY is not None:
calendar.setfirstweekday(swingtime_settings.CALENDAR_FIRST_WEEKDAY)
def event_listing(
request,
template='swingtime/event_list.html',
events=None,
**extra_context
):
'''
View all ``events``.
If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s.
Context parameters:
``events``
an iterable of ``Event`` objects
... plus all values passed in via **extra_context
'''
events = events or Event.objects.all()
extra_context['events'] = events
return render(request, template, extra_context)
def event_view(
request,
pk,
template='swingtime/event_detail.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
View an ``Event`` instance and optionally update either the event or its
occurrences.
Context parameters:
``event``
the event keyed by ``pk``
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
event = get_object_or_404(Event, pk=pk)
event_form = recurrence_form = None
if request.method == 'POST':
if '_update' in request.POST:
event_form = event_form_class(request.POST, instance=event)
if event_form.is_valid():
event_form.save(event)
return http.HttpResponseRedirect(request.path)
elif '_add' in request.POST:
recurrence_form = recurrence_form_class(request.POST)
if recurrence_form.is_valid():
recurrence_form.save(event)
return http.HttpResponseRedirect(request.path)
else:
return http.HttpResponseBadRequest('Bad Request')
data = {
'event': event,
'event_form': event_form or event_form_class(instance=event),
'recurrence_form': recurrence_form or recurrence_form_class(
initial={'dtstart': datetime.now()}
)
}
return render(request, template, data)
def add_event(
request,
template='swingtime/add_event.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s.
Context parameters:
``dtstart``
a datetime.datetime object representing the GET request value if present,
otherwise None
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
dtstart = None
if request.method == 'POST':
event_form = event_form_class(request.POST)
recurrence_form = recurrence_form_class(request.POST)
if event_form.is_valid() and recurrence_form.is_valid():
event = event_form.save()
recurrence_form.save(event)
return http.HttpResponseRedirect(event.get_absolute_url())
else:
if 'dtstart' in request.GET:
try:
dtstart = parser.parse(request.GET['dtstart'])
except(TypeError, ValueError) as exc:
# TODO: A badly formatted date is passed to add_event
logging.warning(exc)
dtstart = dtstart or datetime.now()
event_form = event_form_class()
recurrence_form = recurrence_form_class(initial={'dtstart': dtstart})
return render(
request,
template,
{'dtstart': dtstart, 'event_form': event_form, 'recurrence_form': recurrence_form}
)
def _datetime_view(
request,
template,
dt,
timeslot_factory=None,
items=None,
params=None
):
'''
Build a time slot grid representation for the given datetime ``dt``. See
utils.create_timeslot_table documentation for items and params.
Context parameters:
``day``
the specified datetime value (dt)
``next_day``
day + 1 day
``prev_day``
day - 1 day
``timeslots``
time slot grid of (time, cells) rows
'''
timeslot_factory = timeslot_factory or utils.create_timeslot_table
params = params or {}
return render(request, template, {
'day': dt,
'next_day': dt + timedelta(days=+1),
'prev_day': dt + timedelta(days=-1),
'timeslots': timeslot_factory(dt, items, **params)
})
def day_view(request, year, month, day, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
dt = datetime(int(year), int(month), int(day))
return _datetime_view(request, template, dt, **params)
def today_view(request, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
return _datetime_view(request, template, datetime.now(), **params)
def year_view(request, year, template='swingtime/yearly_view.html', queryset=None):
'''
Context parameters:
``year``
an integer value for the year in questin
``next_year``
year + 1
``last_year``
year - 1
``by_month``
a sorted list of (month, occurrences) tuples where month is a
datetime.datetime object for the first day of a month and occurrences
is a (potentially empty) list of values for that month. Only months
which have at least 1 occurrence is represented in the list
'''
year = int(year)
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(
models.Q(start_time__year=year) |
models.Q(end_time__year=year)
)
def group_key(o):
return datetime(
year,
o.start_time.month if o.start_time.year == year else o.end_time.month,
1
)
return render(request, template, {
'year': year,
'by_month': [(dt, list(o)) for dt, o in itertools.groupby(occurrences, group_key)],
'next_year': year + 1,
'last_year': year - 1
})
def month_view(
request,
year,
month,
template='swingtime/monthly_view.html',
queryset=None
):
'''
Render a tradional calendar grid view with temporal navigation variables.
Context parameters:
``today``
the current datetime.datetime value
``calendar``
a list of rows containing (day, items) cells, where day is the day of
the month integer and items is a (potentially empty) list of occurrence
for the day
``this_month``
a datetime.datetime representing the first day of the month
``next_month``
this_month + 1 month
``last_month``
this_month - 1 month
'''
year, month = int(year), int(month)
cal = calendar.monthcalendar(year, month)
dtstart = datetime(year, month, 1)
last_day = max(cal[-1])
dtend = datetime(year, month, last_day)
# TODO Whether to include those occurrences that started in the previous
# month but end in this month?
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(start_time__year=year, start_time__month=month)
def start_day(o):
return o.start_time.day
by_day = dict([(dt, list(o)) for dt, o in itertools.groupby(occurrences, start_day)])
data = {
'today': datetime.now(),
'calendar': [[(d, by_day.get(d, [])) for d in row] for row in cal],
'this_month': dtstart,
'next_month': dtstart + timedelta(days=+last_day),
'last_month': dtstart + timedelta(days=-1),
}
return render(request, template, data)
|
dakrauth/django-swingtime
|
swingtime/views.py
|
add_event
|
python
|
def add_event(
request,
template='swingtime/add_event.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s.
Context parameters:
``dtstart``
a datetime.datetime object representing the GET request value if present,
otherwise None
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
dtstart = None
if request.method == 'POST':
event_form = event_form_class(request.POST)
recurrence_form = recurrence_form_class(request.POST)
if event_form.is_valid() and recurrence_form.is_valid():
event = event_form.save()
recurrence_form.save(event)
return http.HttpResponseRedirect(event.get_absolute_url())
else:
if 'dtstart' in request.GET:
try:
dtstart = parser.parse(request.GET['dtstart'])
except(TypeError, ValueError) as exc:
# TODO: A badly formatted date is passed to add_event
logging.warning(exc)
dtstart = dtstart or datetime.now()
event_form = event_form_class()
recurrence_form = recurrence_form_class(initial={'dtstart': dtstart})
return render(
request,
template,
{'dtstart': dtstart, 'event_form': event_form, 'recurrence_form': recurrence_form}
)
|
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s.
Context parameters:
``dtstart``
a datetime.datetime object representing the GET request value if present,
otherwise None
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/views.py#L121-L168
| null |
import calendar
import itertools
import logging
from datetime import datetime, timedelta, time
from dateutil import parser
from django import http
from django.db import models
from django.template.context import RequestContext
from django.shortcuts import get_object_or_404, render
from .models import Event, Occurrence
from . import utils, forms
from .conf import swingtime_settings
if swingtime_settings.CALENDAR_FIRST_WEEKDAY is not None:
calendar.setfirstweekday(swingtime_settings.CALENDAR_FIRST_WEEKDAY)
def event_listing(
request,
template='swingtime/event_list.html',
events=None,
**extra_context
):
'''
View all ``events``.
If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s.
Context parameters:
``events``
an iterable of ``Event`` objects
... plus all values passed in via **extra_context
'''
events = events or Event.objects.all()
extra_context['events'] = events
return render(request, template, extra_context)
def event_view(
request,
pk,
template='swingtime/event_detail.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
View an ``Event`` instance and optionally update either the event or its
occurrences.
Context parameters:
``event``
the event keyed by ``pk``
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
event = get_object_or_404(Event, pk=pk)
event_form = recurrence_form = None
if request.method == 'POST':
if '_update' in request.POST:
event_form = event_form_class(request.POST, instance=event)
if event_form.is_valid():
event_form.save(event)
return http.HttpResponseRedirect(request.path)
elif '_add' in request.POST:
recurrence_form = recurrence_form_class(request.POST)
if recurrence_form.is_valid():
recurrence_form.save(event)
return http.HttpResponseRedirect(request.path)
else:
return http.HttpResponseBadRequest('Bad Request')
data = {
'event': event,
'event_form': event_form or event_form_class(instance=event),
'recurrence_form': recurrence_form or recurrence_form_class(
initial={'dtstart': datetime.now()}
)
}
return render(request, template, data)
def occurrence_view(
request,
event_pk,
pk,
template='swingtime/occurrence_detail.html',
form_class=forms.SingleOccurrenceForm
):
'''
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
'''
occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk)
if request.method == 'POST':
form = form_class(request.POST, instance=occurrence)
if form.is_valid():
form.save()
return http.HttpResponseRedirect(request.path)
else:
form = form_class(instance=occurrence)
return render(request, template, {'occurrence': occurrence, 'form': form})
def _datetime_view(
request,
template,
dt,
timeslot_factory=None,
items=None,
params=None
):
'''
Build a time slot grid representation for the given datetime ``dt``. See
utils.create_timeslot_table documentation for items and params.
Context parameters:
``day``
the specified datetime value (dt)
``next_day``
day + 1 day
``prev_day``
day - 1 day
``timeslots``
time slot grid of (time, cells) rows
'''
timeslot_factory = timeslot_factory or utils.create_timeslot_table
params = params or {}
return render(request, template, {
'day': dt,
'next_day': dt + timedelta(days=+1),
'prev_day': dt + timedelta(days=-1),
'timeslots': timeslot_factory(dt, items, **params)
})
def day_view(request, year, month, day, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
dt = datetime(int(year), int(month), int(day))
return _datetime_view(request, template, dt, **params)
def today_view(request, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
return _datetime_view(request, template, datetime.now(), **params)
def year_view(request, year, template='swingtime/yearly_view.html', queryset=None):
'''
Context parameters:
``year``
an integer value for the year in questin
``next_year``
year + 1
``last_year``
year - 1
``by_month``
a sorted list of (month, occurrences) tuples where month is a
datetime.datetime object for the first day of a month and occurrences
is a (potentially empty) list of values for that month. Only months
which have at least 1 occurrence is represented in the list
'''
year = int(year)
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(
models.Q(start_time__year=year) |
models.Q(end_time__year=year)
)
def group_key(o):
return datetime(
year,
o.start_time.month if o.start_time.year == year else o.end_time.month,
1
)
return render(request, template, {
'year': year,
'by_month': [(dt, list(o)) for dt, o in itertools.groupby(occurrences, group_key)],
'next_year': year + 1,
'last_year': year - 1
})
def month_view(
request,
year,
month,
template='swingtime/monthly_view.html',
queryset=None
):
'''
Render a tradional calendar grid view with temporal navigation variables.
Context parameters:
``today``
the current datetime.datetime value
``calendar``
a list of rows containing (day, items) cells, where day is the day of
the month integer and items is a (potentially empty) list of occurrence
for the day
``this_month``
a datetime.datetime representing the first day of the month
``next_month``
this_month + 1 month
``last_month``
this_month - 1 month
'''
year, month = int(year), int(month)
cal = calendar.monthcalendar(year, month)
dtstart = datetime(year, month, 1)
last_day = max(cal[-1])
dtend = datetime(year, month, last_day)
# TODO Whether to include those occurrences that started in the previous
# month but end in this month?
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(start_time__year=year, start_time__month=month)
def start_day(o):
return o.start_time.day
by_day = dict([(dt, list(o)) for dt, o in itertools.groupby(occurrences, start_day)])
data = {
'today': datetime.now(),
'calendar': [[(d, by_day.get(d, [])) for d in row] for row in cal],
'this_month': dtstart,
'next_month': dtstart + timedelta(days=+last_day),
'last_month': dtstart + timedelta(days=-1),
}
return render(request, template, data)
|
dakrauth/django-swingtime
|
swingtime/views.py
|
_datetime_view
|
python
|
def _datetime_view(
request,
template,
dt,
timeslot_factory=None,
items=None,
params=None
):
'''
Build a time slot grid representation for the given datetime ``dt``. See
utils.create_timeslot_table documentation for items and params.
Context parameters:
``day``
the specified datetime value (dt)
``next_day``
day + 1 day
``prev_day``
day - 1 day
``timeslots``
time slot grid of (time, cells) rows
'''
timeslot_factory = timeslot_factory or utils.create_timeslot_table
params = params or {}
return render(request, template, {
'day': dt,
'next_day': dt + timedelta(days=+1),
'prev_day': dt + timedelta(days=-1),
'timeslots': timeslot_factory(dt, items, **params)
})
|
Build a time slot grid representation for the given datetime ``dt``. See
utils.create_timeslot_table documentation for items and params.
Context parameters:
``day``
the specified datetime value (dt)
``next_day``
day + 1 day
``prev_day``
day - 1 day
``timeslots``
time slot grid of (time, cells) rows
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/views.py#L171-L206
| null |
import calendar
import itertools
import logging
from datetime import datetime, timedelta, time
from dateutil import parser
from django import http
from django.db import models
from django.template.context import RequestContext
from django.shortcuts import get_object_or_404, render
from .models import Event, Occurrence
from . import utils, forms
from .conf import swingtime_settings
if swingtime_settings.CALENDAR_FIRST_WEEKDAY is not None:
calendar.setfirstweekday(swingtime_settings.CALENDAR_FIRST_WEEKDAY)
def event_listing(
request,
template='swingtime/event_list.html',
events=None,
**extra_context
):
'''
View all ``events``.
If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s.
Context parameters:
``events``
an iterable of ``Event`` objects
... plus all values passed in via **extra_context
'''
events = events or Event.objects.all()
extra_context['events'] = events
return render(request, template, extra_context)
def event_view(
request,
pk,
template='swingtime/event_detail.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
View an ``Event`` instance and optionally update either the event or its
occurrences.
Context parameters:
``event``
the event keyed by ``pk``
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
event = get_object_or_404(Event, pk=pk)
event_form = recurrence_form = None
if request.method == 'POST':
if '_update' in request.POST:
event_form = event_form_class(request.POST, instance=event)
if event_form.is_valid():
event_form.save(event)
return http.HttpResponseRedirect(request.path)
elif '_add' in request.POST:
recurrence_form = recurrence_form_class(request.POST)
if recurrence_form.is_valid():
recurrence_form.save(event)
return http.HttpResponseRedirect(request.path)
else:
return http.HttpResponseBadRequest('Bad Request')
data = {
'event': event,
'event_form': event_form or event_form_class(instance=event),
'recurrence_form': recurrence_form or recurrence_form_class(
initial={'dtstart': datetime.now()}
)
}
return render(request, template, data)
def occurrence_view(
request,
event_pk,
pk,
template='swingtime/occurrence_detail.html',
form_class=forms.SingleOccurrenceForm
):
'''
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
'''
occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk)
if request.method == 'POST':
form = form_class(request.POST, instance=occurrence)
if form.is_valid():
form.save()
return http.HttpResponseRedirect(request.path)
else:
form = form_class(instance=occurrence)
return render(request, template, {'occurrence': occurrence, 'form': form})
def add_event(
request,
template='swingtime/add_event.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s.
Context parameters:
``dtstart``
a datetime.datetime object representing the GET request value if present,
otherwise None
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
dtstart = None
if request.method == 'POST':
event_form = event_form_class(request.POST)
recurrence_form = recurrence_form_class(request.POST)
if event_form.is_valid() and recurrence_form.is_valid():
event = event_form.save()
recurrence_form.save(event)
return http.HttpResponseRedirect(event.get_absolute_url())
else:
if 'dtstart' in request.GET:
try:
dtstart = parser.parse(request.GET['dtstart'])
except(TypeError, ValueError) as exc:
# TODO: A badly formatted date is passed to add_event
logging.warning(exc)
dtstart = dtstart or datetime.now()
event_form = event_form_class()
recurrence_form = recurrence_form_class(initial={'dtstart': dtstart})
return render(
request,
template,
{'dtstart': dtstart, 'event_form': event_form, 'recurrence_form': recurrence_form}
)
def day_view(request, year, month, day, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
dt = datetime(int(year), int(month), int(day))
return _datetime_view(request, template, dt, **params)
def today_view(request, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
return _datetime_view(request, template, datetime.now(), **params)
def year_view(request, year, template='swingtime/yearly_view.html', queryset=None):
'''
Context parameters:
``year``
an integer value for the year in questin
``next_year``
year + 1
``last_year``
year - 1
``by_month``
a sorted list of (month, occurrences) tuples where month is a
datetime.datetime object for the first day of a month and occurrences
is a (potentially empty) list of values for that month. Only months
which have at least 1 occurrence is represented in the list
'''
year = int(year)
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(
models.Q(start_time__year=year) |
models.Q(end_time__year=year)
)
def group_key(o):
return datetime(
year,
o.start_time.month if o.start_time.year == year else o.end_time.month,
1
)
return render(request, template, {
'year': year,
'by_month': [(dt, list(o)) for dt, o in itertools.groupby(occurrences, group_key)],
'next_year': year + 1,
'last_year': year - 1
})
def month_view(
request,
year,
month,
template='swingtime/monthly_view.html',
queryset=None
):
'''
Render a tradional calendar grid view with temporal navigation variables.
Context parameters:
``today``
the current datetime.datetime value
``calendar``
a list of rows containing (day, items) cells, where day is the day of
the month integer and items is a (potentially empty) list of occurrence
for the day
``this_month``
a datetime.datetime representing the first day of the month
``next_month``
this_month + 1 month
``last_month``
this_month - 1 month
'''
year, month = int(year), int(month)
cal = calendar.monthcalendar(year, month)
dtstart = datetime(year, month, 1)
last_day = max(cal[-1])
dtend = datetime(year, month, last_day)
# TODO Whether to include those occurrences that started in the previous
# month but end in this month?
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(start_time__year=year, start_time__month=month)
def start_day(o):
return o.start_time.day
by_day = dict([(dt, list(o)) for dt, o in itertools.groupby(occurrences, start_day)])
data = {
'today': datetime.now(),
'calendar': [[(d, by_day.get(d, [])) for d in row] for row in cal],
'this_month': dtstart,
'next_month': dtstart + timedelta(days=+last_day),
'last_month': dtstart + timedelta(days=-1),
}
return render(request, template, data)
|
dakrauth/django-swingtime
|
swingtime/views.py
|
day_view
|
python
|
def day_view(request, year, month, day, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
dt = datetime(int(year), int(month), int(day))
return _datetime_view(request, template, dt, **params)
|
See documentation for function``_datetime_view``.
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/views.py#L209-L215
|
[
"def _datetime_view(\n request,\n template,\n dt,\n timeslot_factory=None,\n items=None,\n params=None\n):\n '''\n Build a time slot grid representation for the given datetime ``dt``. See\n utils.create_timeslot_table documentation for items and params.\n\n Context parameters:\n\n ``day``\n the specified datetime value (dt)\n\n ``next_day``\n day + 1 day\n\n ``prev_day``\n day - 1 day\n\n ``timeslots``\n time slot grid of (time, cells) rows\n\n '''\n timeslot_factory = timeslot_factory or utils.create_timeslot_table\n params = params or {}\n\n return render(request, template, {\n 'day': dt,\n 'next_day': dt + timedelta(days=+1),\n 'prev_day': dt + timedelta(days=-1),\n 'timeslots': timeslot_factory(dt, items, **params)\n })\n"
] |
import calendar
import itertools
import logging
from datetime import datetime, timedelta, time
from dateutil import parser
from django import http
from django.db import models
from django.template.context import RequestContext
from django.shortcuts import get_object_or_404, render
from .models import Event, Occurrence
from . import utils, forms
from .conf import swingtime_settings
if swingtime_settings.CALENDAR_FIRST_WEEKDAY is not None:
calendar.setfirstweekday(swingtime_settings.CALENDAR_FIRST_WEEKDAY)
def event_listing(
request,
template='swingtime/event_list.html',
events=None,
**extra_context
):
'''
View all ``events``.
If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s.
Context parameters:
``events``
an iterable of ``Event`` objects
... plus all values passed in via **extra_context
'''
events = events or Event.objects.all()
extra_context['events'] = events
return render(request, template, extra_context)
def event_view(
request,
pk,
template='swingtime/event_detail.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
View an ``Event`` instance and optionally update either the event or its
occurrences.
Context parameters:
``event``
the event keyed by ``pk``
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
event = get_object_or_404(Event, pk=pk)
event_form = recurrence_form = None
if request.method == 'POST':
if '_update' in request.POST:
event_form = event_form_class(request.POST, instance=event)
if event_form.is_valid():
event_form.save(event)
return http.HttpResponseRedirect(request.path)
elif '_add' in request.POST:
recurrence_form = recurrence_form_class(request.POST)
if recurrence_form.is_valid():
recurrence_form.save(event)
return http.HttpResponseRedirect(request.path)
else:
return http.HttpResponseBadRequest('Bad Request')
data = {
'event': event,
'event_form': event_form or event_form_class(instance=event),
'recurrence_form': recurrence_form or recurrence_form_class(
initial={'dtstart': datetime.now()}
)
}
return render(request, template, data)
def occurrence_view(
request,
event_pk,
pk,
template='swingtime/occurrence_detail.html',
form_class=forms.SingleOccurrenceForm
):
'''
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
'''
occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk)
if request.method == 'POST':
form = form_class(request.POST, instance=occurrence)
if form.is_valid():
form.save()
return http.HttpResponseRedirect(request.path)
else:
form = form_class(instance=occurrence)
return render(request, template, {'occurrence': occurrence, 'form': form})
def add_event(
request,
template='swingtime/add_event.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s.
Context parameters:
``dtstart``
a datetime.datetime object representing the GET request value if present,
otherwise None
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
dtstart = None
if request.method == 'POST':
event_form = event_form_class(request.POST)
recurrence_form = recurrence_form_class(request.POST)
if event_form.is_valid() and recurrence_form.is_valid():
event = event_form.save()
recurrence_form.save(event)
return http.HttpResponseRedirect(event.get_absolute_url())
else:
if 'dtstart' in request.GET:
try:
dtstart = parser.parse(request.GET['dtstart'])
except(TypeError, ValueError) as exc:
# TODO: A badly formatted date is passed to add_event
logging.warning(exc)
dtstart = dtstart or datetime.now()
event_form = event_form_class()
recurrence_form = recurrence_form_class(initial={'dtstart': dtstart})
return render(
request,
template,
{'dtstart': dtstart, 'event_form': event_form, 'recurrence_form': recurrence_form}
)
def _datetime_view(
request,
template,
dt,
timeslot_factory=None,
items=None,
params=None
):
'''
Build a time slot grid representation for the given datetime ``dt``. See
utils.create_timeslot_table documentation for items and params.
Context parameters:
``day``
the specified datetime value (dt)
``next_day``
day + 1 day
``prev_day``
day - 1 day
``timeslots``
time slot grid of (time, cells) rows
'''
timeslot_factory = timeslot_factory or utils.create_timeslot_table
params = params or {}
return render(request, template, {
'day': dt,
'next_day': dt + timedelta(days=+1),
'prev_day': dt + timedelta(days=-1),
'timeslots': timeslot_factory(dt, items, **params)
})
def today_view(request, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
return _datetime_view(request, template, datetime.now(), **params)
def year_view(request, year, template='swingtime/yearly_view.html', queryset=None):
'''
Context parameters:
``year``
an integer value for the year in questin
``next_year``
year + 1
``last_year``
year - 1
``by_month``
a sorted list of (month, occurrences) tuples where month is a
datetime.datetime object for the first day of a month and occurrences
is a (potentially empty) list of values for that month. Only months
which have at least 1 occurrence is represented in the list
'''
year = int(year)
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(
models.Q(start_time__year=year) |
models.Q(end_time__year=year)
)
def group_key(o):
return datetime(
year,
o.start_time.month if o.start_time.year == year else o.end_time.month,
1
)
return render(request, template, {
'year': year,
'by_month': [(dt, list(o)) for dt, o in itertools.groupby(occurrences, group_key)],
'next_year': year + 1,
'last_year': year - 1
})
def month_view(
request,
year,
month,
template='swingtime/monthly_view.html',
queryset=None
):
'''
Render a tradional calendar grid view with temporal navigation variables.
Context parameters:
``today``
the current datetime.datetime value
``calendar``
a list of rows containing (day, items) cells, where day is the day of
the month integer and items is a (potentially empty) list of occurrence
for the day
``this_month``
a datetime.datetime representing the first day of the month
``next_month``
this_month + 1 month
``last_month``
this_month - 1 month
'''
year, month = int(year), int(month)
cal = calendar.monthcalendar(year, month)
dtstart = datetime(year, month, 1)
last_day = max(cal[-1])
dtend = datetime(year, month, last_day)
# TODO Whether to include those occurrences that started in the previous
# month but end in this month?
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(start_time__year=year, start_time__month=month)
def start_day(o):
return o.start_time.day
by_day = dict([(dt, list(o)) for dt, o in itertools.groupby(occurrences, start_day)])
data = {
'today': datetime.now(),
'calendar': [[(d, by_day.get(d, [])) for d in row] for row in cal],
'this_month': dtstart,
'next_month': dtstart + timedelta(days=+last_day),
'last_month': dtstart + timedelta(days=-1),
}
return render(request, template, data)
|
dakrauth/django-swingtime
|
swingtime/views.py
|
today_view
|
python
|
def today_view(request, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
return _datetime_view(request, template, datetime.now(), **params)
|
See documentation for function``_datetime_view``.
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/views.py#L218-L223
|
[
"def _datetime_view(\n request,\n template,\n dt,\n timeslot_factory=None,\n items=None,\n params=None\n):\n '''\n Build a time slot grid representation for the given datetime ``dt``. See\n utils.create_timeslot_table documentation for items and params.\n\n Context parameters:\n\n ``day``\n the specified datetime value (dt)\n\n ``next_day``\n day + 1 day\n\n ``prev_day``\n day - 1 day\n\n ``timeslots``\n time slot grid of (time, cells) rows\n\n '''\n timeslot_factory = timeslot_factory or utils.create_timeslot_table\n params = params or {}\n\n return render(request, template, {\n 'day': dt,\n 'next_day': dt + timedelta(days=+1),\n 'prev_day': dt + timedelta(days=-1),\n 'timeslots': timeslot_factory(dt, items, **params)\n })\n"
] |
import calendar
import itertools
import logging
from datetime import datetime, timedelta, time
from dateutil import parser
from django import http
from django.db import models
from django.template.context import RequestContext
from django.shortcuts import get_object_or_404, render
from .models import Event, Occurrence
from . import utils, forms
from .conf import swingtime_settings
if swingtime_settings.CALENDAR_FIRST_WEEKDAY is not None:
calendar.setfirstweekday(swingtime_settings.CALENDAR_FIRST_WEEKDAY)
def event_listing(
request,
template='swingtime/event_list.html',
events=None,
**extra_context
):
'''
View all ``events``.
If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s.
Context parameters:
``events``
an iterable of ``Event`` objects
... plus all values passed in via **extra_context
'''
events = events or Event.objects.all()
extra_context['events'] = events
return render(request, template, extra_context)
def event_view(
request,
pk,
template='swingtime/event_detail.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
View an ``Event`` instance and optionally update either the event or its
occurrences.
Context parameters:
``event``
the event keyed by ``pk``
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
event = get_object_or_404(Event, pk=pk)
event_form = recurrence_form = None
if request.method == 'POST':
if '_update' in request.POST:
event_form = event_form_class(request.POST, instance=event)
if event_form.is_valid():
event_form.save(event)
return http.HttpResponseRedirect(request.path)
elif '_add' in request.POST:
recurrence_form = recurrence_form_class(request.POST)
if recurrence_form.is_valid():
recurrence_form.save(event)
return http.HttpResponseRedirect(request.path)
else:
return http.HttpResponseBadRequest('Bad Request')
data = {
'event': event,
'event_form': event_form or event_form_class(instance=event),
'recurrence_form': recurrence_form or recurrence_form_class(
initial={'dtstart': datetime.now()}
)
}
return render(request, template, data)
def occurrence_view(
request,
event_pk,
pk,
template='swingtime/occurrence_detail.html',
form_class=forms.SingleOccurrenceForm
):
'''
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
'''
occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk)
if request.method == 'POST':
form = form_class(request.POST, instance=occurrence)
if form.is_valid():
form.save()
return http.HttpResponseRedirect(request.path)
else:
form = form_class(instance=occurrence)
return render(request, template, {'occurrence': occurrence, 'form': form})
def add_event(
request,
template='swingtime/add_event.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s.
Context parameters:
``dtstart``
a datetime.datetime object representing the GET request value if present,
otherwise None
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
dtstart = None
if request.method == 'POST':
event_form = event_form_class(request.POST)
recurrence_form = recurrence_form_class(request.POST)
if event_form.is_valid() and recurrence_form.is_valid():
event = event_form.save()
recurrence_form.save(event)
return http.HttpResponseRedirect(event.get_absolute_url())
else:
if 'dtstart' in request.GET:
try:
dtstart = parser.parse(request.GET['dtstart'])
except(TypeError, ValueError) as exc:
# TODO: A badly formatted date is passed to add_event
logging.warning(exc)
dtstart = dtstart or datetime.now()
event_form = event_form_class()
recurrence_form = recurrence_form_class(initial={'dtstart': dtstart})
return render(
request,
template,
{'dtstart': dtstart, 'event_form': event_form, 'recurrence_form': recurrence_form}
)
def _datetime_view(
request,
template,
dt,
timeslot_factory=None,
items=None,
params=None
):
'''
Build a time slot grid representation for the given datetime ``dt``. See
utils.create_timeslot_table documentation for items and params.
Context parameters:
``day``
the specified datetime value (dt)
``next_day``
day + 1 day
``prev_day``
day - 1 day
``timeslots``
time slot grid of (time, cells) rows
'''
timeslot_factory = timeslot_factory or utils.create_timeslot_table
params = params or {}
return render(request, template, {
'day': dt,
'next_day': dt + timedelta(days=+1),
'prev_day': dt + timedelta(days=-1),
'timeslots': timeslot_factory(dt, items, **params)
})
def day_view(request, year, month, day, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
dt = datetime(int(year), int(month), int(day))
return _datetime_view(request, template, dt, **params)
def year_view(request, year, template='swingtime/yearly_view.html', queryset=None):
'''
Context parameters:
``year``
an integer value for the year in questin
``next_year``
year + 1
``last_year``
year - 1
``by_month``
a sorted list of (month, occurrences) tuples where month is a
datetime.datetime object for the first day of a month and occurrences
is a (potentially empty) list of values for that month. Only months
which have at least 1 occurrence is represented in the list
'''
year = int(year)
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(
models.Q(start_time__year=year) |
models.Q(end_time__year=year)
)
def group_key(o):
return datetime(
year,
o.start_time.month if o.start_time.year == year else o.end_time.month,
1
)
return render(request, template, {
'year': year,
'by_month': [(dt, list(o)) for dt, o in itertools.groupby(occurrences, group_key)],
'next_year': year + 1,
'last_year': year - 1
})
def month_view(
request,
year,
month,
template='swingtime/monthly_view.html',
queryset=None
):
'''
Render a tradional calendar grid view with temporal navigation variables.
Context parameters:
``today``
the current datetime.datetime value
``calendar``
a list of rows containing (day, items) cells, where day is the day of
the month integer and items is a (potentially empty) list of occurrence
for the day
``this_month``
a datetime.datetime representing the first day of the month
``next_month``
this_month + 1 month
``last_month``
this_month - 1 month
'''
year, month = int(year), int(month)
cal = calendar.monthcalendar(year, month)
dtstart = datetime(year, month, 1)
last_day = max(cal[-1])
dtend = datetime(year, month, last_day)
# TODO Whether to include those occurrences that started in the previous
# month but end in this month?
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(start_time__year=year, start_time__month=month)
def start_day(o):
return o.start_time.day
by_day = dict([(dt, list(o)) for dt, o in itertools.groupby(occurrences, start_day)])
data = {
'today': datetime.now(),
'calendar': [[(d, by_day.get(d, [])) for d in row] for row in cal],
'this_month': dtstart,
'next_month': dtstart + timedelta(days=+last_day),
'last_month': dtstart + timedelta(days=-1),
}
return render(request, template, data)
|
dakrauth/django-swingtime
|
swingtime/views.py
|
month_view
|
python
|
def month_view(
request,
year,
month,
template='swingtime/monthly_view.html',
queryset=None
):
'''
Render a tradional calendar grid view with temporal navigation variables.
Context parameters:
``today``
the current datetime.datetime value
``calendar``
a list of rows containing (day, items) cells, where day is the day of
the month integer and items is a (potentially empty) list of occurrence
for the day
``this_month``
a datetime.datetime representing the first day of the month
``next_month``
this_month + 1 month
``last_month``
this_month - 1 month
'''
year, month = int(year), int(month)
cal = calendar.monthcalendar(year, month)
dtstart = datetime(year, month, 1)
last_day = max(cal[-1])
dtend = datetime(year, month, last_day)
# TODO Whether to include those occurrences that started in the previous
# month but end in this month?
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(start_time__year=year, start_time__month=month)
def start_day(o):
return o.start_time.day
by_day = dict([(dt, list(o)) for dt, o in itertools.groupby(occurrences, start_day)])
data = {
'today': datetime.now(),
'calendar': [[(d, by_day.get(d, [])) for d in row] for row in cal],
'this_month': dtstart,
'next_month': dtstart + timedelta(days=+last_day),
'last_month': dtstart + timedelta(days=-1),
}
return render(request, template, data)
|
Render a tradional calendar grid view with temporal navigation variables.
Context parameters:
``today``
the current datetime.datetime value
``calendar``
a list of rows containing (day, items) cells, where day is the day of
the month integer and items is a (potentially empty) list of occurrence
for the day
``this_month``
a datetime.datetime representing the first day of the month
``next_month``
this_month + 1 month
``last_month``
this_month - 1 month
|
train
|
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/views.py#L270-L323
| null |
import calendar
import itertools
import logging
from datetime import datetime, timedelta, time
from dateutil import parser
from django import http
from django.db import models
from django.template.context import RequestContext
from django.shortcuts import get_object_or_404, render
from .models import Event, Occurrence
from . import utils, forms
from .conf import swingtime_settings
if swingtime_settings.CALENDAR_FIRST_WEEKDAY is not None:
calendar.setfirstweekday(swingtime_settings.CALENDAR_FIRST_WEEKDAY)
def event_listing(
request,
template='swingtime/event_list.html',
events=None,
**extra_context
):
'''
View all ``events``.
If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s.
Context parameters:
``events``
an iterable of ``Event`` objects
... plus all values passed in via **extra_context
'''
events = events or Event.objects.all()
extra_context['events'] = events
return render(request, template, extra_context)
def event_view(
request,
pk,
template='swingtime/event_detail.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
View an ``Event`` instance and optionally update either the event or its
occurrences.
Context parameters:
``event``
the event keyed by ``pk``
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
event = get_object_or_404(Event, pk=pk)
event_form = recurrence_form = None
if request.method == 'POST':
if '_update' in request.POST:
event_form = event_form_class(request.POST, instance=event)
if event_form.is_valid():
event_form.save(event)
return http.HttpResponseRedirect(request.path)
elif '_add' in request.POST:
recurrence_form = recurrence_form_class(request.POST)
if recurrence_form.is_valid():
recurrence_form.save(event)
return http.HttpResponseRedirect(request.path)
else:
return http.HttpResponseBadRequest('Bad Request')
data = {
'event': event,
'event_form': event_form or event_form_class(instance=event),
'recurrence_form': recurrence_form or recurrence_form_class(
initial={'dtstart': datetime.now()}
)
}
return render(request, template, data)
def occurrence_view(
request,
event_pk,
pk,
template='swingtime/occurrence_detail.html',
form_class=forms.SingleOccurrenceForm
):
'''
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
'''
occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk)
if request.method == 'POST':
form = form_class(request.POST, instance=occurrence)
if form.is_valid():
form.save()
return http.HttpResponseRedirect(request.path)
else:
form = form_class(instance=occurrence)
return render(request, template, {'occurrence': occurrence, 'form': form})
def add_event(
request,
template='swingtime/add_event.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s.
Context parameters:
``dtstart``
a datetime.datetime object representing the GET request value if present,
otherwise None
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
dtstart = None
if request.method == 'POST':
event_form = event_form_class(request.POST)
recurrence_form = recurrence_form_class(request.POST)
if event_form.is_valid() and recurrence_form.is_valid():
event = event_form.save()
recurrence_form.save(event)
return http.HttpResponseRedirect(event.get_absolute_url())
else:
if 'dtstart' in request.GET:
try:
dtstart = parser.parse(request.GET['dtstart'])
except(TypeError, ValueError) as exc:
# TODO: A badly formatted date is passed to add_event
logging.warning(exc)
dtstart = dtstart or datetime.now()
event_form = event_form_class()
recurrence_form = recurrence_form_class(initial={'dtstart': dtstart})
return render(
request,
template,
{'dtstart': dtstart, 'event_form': event_form, 'recurrence_form': recurrence_form}
)
def _datetime_view(
request,
template,
dt,
timeslot_factory=None,
items=None,
params=None
):
'''
Build a time slot grid representation for the given datetime ``dt``. See
utils.create_timeslot_table documentation for items and params.
Context parameters:
``day``
the specified datetime value (dt)
``next_day``
day + 1 day
``prev_day``
day - 1 day
``timeslots``
time slot grid of (time, cells) rows
'''
timeslot_factory = timeslot_factory or utils.create_timeslot_table
params = params or {}
return render(request, template, {
'day': dt,
'next_day': dt + timedelta(days=+1),
'prev_day': dt + timedelta(days=-1),
'timeslots': timeslot_factory(dt, items, **params)
})
def day_view(request, year, month, day, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
dt = datetime(int(year), int(month), int(day))
return _datetime_view(request, template, dt, **params)
def today_view(request, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
return _datetime_view(request, template, datetime.now(), **params)
def year_view(request, year, template='swingtime/yearly_view.html', queryset=None):
'''
Context parameters:
``year``
an integer value for the year in questin
``next_year``
year + 1
``last_year``
year - 1
``by_month``
a sorted list of (month, occurrences) tuples where month is a
datetime.datetime object for the first day of a month and occurrences
is a (potentially empty) list of values for that month. Only months
which have at least 1 occurrence is represented in the list
'''
year = int(year)
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(
models.Q(start_time__year=year) |
models.Q(end_time__year=year)
)
def group_key(o):
return datetime(
year,
o.start_time.month if o.start_time.year == year else o.end_time.month,
1
)
return render(request, template, {
'year': year,
'by_month': [(dt, list(o)) for dt, o in itertools.groupby(occurrences, group_key)],
'next_year': year + 1,
'last_year': year - 1
})
|
systemd/python-systemd
|
systemd/journal.py
|
get_catalog
|
python
|
def get_catalog(mid):
if isinstance(mid, _uuid.UUID):
mid = mid.hex
return _get_catalog(mid)
|
Return catalog entry for the specified ID.
`mid` should be either a UUID or a 32 digit hex number.
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L393-L400
| null |
# -*- Mode: python; coding:utf-8; indent-tabs-mode: nil -*- */
#
#
# Copyright 2012 David Strauss <david@davidstrauss.net>
# Copyright 2012 Zbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl>
# Copyright 2012 Marti Raudsepp <marti@juffo.org>
#
# python-systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# python-systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with python-systemd; If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import sys as _sys
import datetime as _datetime
import uuid as _uuid
import traceback as _traceback
import os as _os
import logging as _logging
from syslog import (LOG_EMERG, LOG_ALERT, LOG_CRIT, LOG_ERR,
LOG_WARNING, LOG_NOTICE, LOG_INFO, LOG_DEBUG)
if _sys.version_info >= (3,3):
from collections import ChainMap as _ChainMap
from ._journal import __version__, sendv, stream_fd
from ._reader import (_Reader, NOP, APPEND, INVALIDATE,
LOCAL_ONLY, RUNTIME_ONLY,
SYSTEM, SYSTEM_ONLY, CURRENT_USER,
OS_ROOT,
_get_catalog)
from . import id128 as _id128
if _sys.version_info >= (3,):
from ._reader import Monotonic
else:
Monotonic = tuple
def _convert_monotonic(m):
return Monotonic((_datetime.timedelta(microseconds=m[0]),
_uuid.UUID(bytes=m[1])))
def _convert_source_monotonic(s):
return _datetime.timedelta(microseconds=int(s))
def _convert_realtime(t):
return _datetime.datetime.fromtimestamp(t / 1000000)
def _convert_timestamp(s):
return _datetime.datetime.fromtimestamp(int(s) / 1000000)
def _convert_trivial(x):
return x
if _sys.version_info >= (3,):
def _convert_uuid(s):
return _uuid.UUID(s.decode())
else:
_convert_uuid = _uuid.UUID
DEFAULT_CONVERTERS = {
'MESSAGE_ID': _convert_uuid,
'_MACHINE_ID': _convert_uuid,
'_BOOT_ID': _convert_uuid,
'PRIORITY': int,
'LEADER': int,
'SESSION_ID': int,
'USERSPACE_USEC': int,
'INITRD_USEC': int,
'KERNEL_USEC': int,
'_UID': int,
'_GID': int,
'_PID': int,
'SYSLOG_FACILITY': int,
'SYSLOG_PID': int,
'_AUDIT_SESSION': int,
'_AUDIT_LOGINUID': int,
'_SYSTEMD_SESSION': int,
'_SYSTEMD_OWNER_UID': int,
'CODE_LINE': int,
'ERRNO': int,
'EXIT_STATUS': int,
'_SOURCE_REALTIME_TIMESTAMP': _convert_timestamp,
'__REALTIME_TIMESTAMP': _convert_realtime,
'_SOURCE_MONOTONIC_TIMESTAMP': _convert_source_monotonic,
'__MONOTONIC_TIMESTAMP': _convert_monotonic,
'__CURSOR': _convert_trivial,
'COREDUMP': bytes,
'COREDUMP_PID': int,
'COREDUMP_UID': int,
'COREDUMP_GID': int,
'COREDUMP_SESSION': int,
'COREDUMP_SIGNAL': int,
'COREDUMP_TIMESTAMP': _convert_timestamp,
}
_IDENT_CHARACTER = set('ABCDEFGHIJKLMNOPQRTSUVWXYZ_0123456789')
def _valid_field_name(s):
return not (set(s) - _IDENT_CHARACTER)
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
def _make_line(field, value):
if isinstance(value, bytes):
return field.encode('utf-8') + b'=' + value
elif isinstance(value, str):
return field + '=' + value
else:
return field + '=' + str(value)
def send(MESSAGE, MESSAGE_ID=None,
CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None,
**kwargs):
r"""Send a message to the journal.
>>> from systemd import journal
>>> journal.send('Hello world')
>>> journal.send('Hello, again, world', FIELD2='Greetings!')
>>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef')
Value of the MESSAGE argument will be used for the MESSAGE= field. MESSAGE
must be a string and will be sent as UTF-8 to the journal.
MESSAGE_ID can be given to uniquely identify the type of message. It must be
a string or a uuid.UUID object.
CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to identify the caller.
Unless at least on of the three is given, values are extracted from the
stack frame of the caller of send(). CODE_FILE and CODE_FUNC must be
strings, CODE_LINE must be an integer.
Additional fields for the journal entry can only be specified as keyword
arguments. The payload can be either a string or bytes. A string will be
sent as UTF-8, and bytes will be sent as-is to the journal.
Other useful fields include PRIORITY, SYSLOG_FACILITY, SYSLOG_IDENTIFIER,
SYSLOG_PID.
"""
args = ['MESSAGE=' + MESSAGE]
if MESSAGE_ID is not None:
id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID)
args.append('MESSAGE_ID=' + id)
if CODE_LINE is CODE_FILE is CODE_FUNC is None:
CODE_FILE, CODE_LINE, CODE_FUNC = _traceback.extract_stack(limit=2)[0][:3]
if CODE_FILE is not None:
args.append('CODE_FILE=' + CODE_FILE)
if CODE_LINE is not None:
args.append('CODE_LINE={:d}'.format(CODE_LINE))
if CODE_FUNC is not None:
args.append('CODE_FUNC=' + CODE_FUNC)
args.extend(_make_line(key, val) for key, val in kwargs.items())
return sendv(*args)
def stream(identifier=None, priority=LOG_INFO, level_prefix=False):
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted text strings
are written to the journal.
The file will be line buffered, so messages are actually sent after a
newline character is written.
>>> from systemd import journal
>>> stream = journal.stream('myapp') # doctest: +SKIP
>>> res = stream.write('message...\n') # doctest: +SKIP
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
If identifier is None, a suitable default based on sys.argv[0] will be used.
This interface can be used conveniently with the print function:
>>> from __future__ import print_function
>>> stream = journal.stream() # doctest: +SKIP
>>> print('message...', file=stream) # doctest: +SKIP
priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`,
`LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority level prefixes
(such as '<1>') are interpreted. See sd-daemon(3) for more information.
"""
if identifier is None:
if not _sys.argv or not _sys.argv[0] or _sys.argv[0] == '-c':
identifier = 'python'
else:
identifier = _sys.argv[0]
fd = stream_fd(identifier, priority, level_prefix)
return _os.fdopen(fd, 'w', 1)
class JournalHandler(_logging.Handler):
"""Journal handler class for the Python logging framework.
Please see the Python logging module documentation for an overview:
http://docs.python.org/library/logging.html.
To create a custom logger whose messages go only to journal:
>>> import logging
>>> log = logging.getLogger('custom_logger_name')
>>> log.propagate = False
>>> log.addHandler(JournalHandler())
>>> log.warning("Some message: %s", 'detail')
Note that by default, message levels `INFO` and `DEBUG` are ignored by the
logging framework. To enable those log levels:
>>> log.setLevel(logging.DEBUG)
To redirect all logging messages to journal regardless of where they come
from, attach it to the root logger:
>>> logging.root.addHandler(JournalHandler())
For more complex configurations when using `dictConfig` or `fileConfig`,
specify `systemd.journal.JournalHandler` as the handler class. Only
standard handler configuration options are supported: `level`, `formatter`,
`filters`.
To attach journal MESSAGE_ID, an extra field is supported:
>>> import uuid
>>> mid = uuid.UUID('0123456789ABCDEF0123456789ABCDEF')
>>> log.warning("Message with ID", extra={'MESSAGE_ID': mid})
Fields to be attached to all messages sent through this handler can be
specified as keyword arguments. This probably makes sense only for
SYSLOG_IDENTIFIER and similar fields which are constant for the whole
program:
>>> JournalHandler(SYSLOG_IDENTIFIER='my-cool-app')
<...JournalHandler ...>
The following journal fields will be sent: `MESSAGE`, `PRIORITY`,
`THREAD_NAME`, `CODE_FILE`, `CODE_LINE`, `CODE_FUNC`, `LOGGER` (name as
supplied to getLogger call), `MESSAGE_ID` (optional, see above),
`SYSLOG_IDENTIFIER` (defaults to sys.argv[0]).
The function used to actually send messages can be overridden using
the `sender_function` parameter.
"""
def __init__(self, level=_logging.NOTSET, sender_function=send, **kwargs):
super(JournalHandler, self).__init__(level)
for name in kwargs:
if not _valid_field_name(name):
raise ValueError('Invalid field name: ' + name)
if 'SYSLOG_IDENTIFIER' not in kwargs:
kwargs['SYSLOG_IDENTIFIER'] = _sys.argv[0]
self.send = sender_function
self._extra = kwargs
def emit(self, record):
"""Write `record` as a journal event.
MESSAGE is taken from the message provided by the user, and PRIORITY,
LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be used if present.
"""
try:
msg = self.format(record)
pri = self.map_priority(record.levelno)
# defaults
extras = self._extra.copy()
# higher priority
if record.exc_text:
extras['EXCEPTION_TEXT'] = record.exc_text
if record.exc_info:
extras['EXCEPTION_INFO'] = record.exc_info
if record.args:
extras['CODE_ARGS'] = str(record.args)
# explicit arguments — highest priority
extras.update(record.__dict__)
self.send(msg,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
PROCESS_NAME=record.processName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**extras)
except Exception:
self.handleError(record)
@staticmethod
def map_priority(levelno):
"""Map logging levels to journald priorities.
Since Python log level numbers are "sparse", we have to map numbers in
between the standard levels too.
"""
if levelno <= _logging.DEBUG:
return LOG_DEBUG
elif levelno <= _logging.INFO:
return LOG_INFO
elif levelno <= _logging.WARNING:
return LOG_WARNING
elif levelno <= _logging.ERROR:
return LOG_ERR
elif levelno <= _logging.CRITICAL:
return LOG_CRIT
else:
return LOG_ALERT
mapPriority = map_priority
|
systemd/python-systemd
|
systemd/journal.py
|
stream
|
python
|
def stream(identifier=None, priority=LOG_INFO, level_prefix=False):
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted text strings
are written to the journal.
The file will be line buffered, so messages are actually sent after a
newline character is written.
>>> from systemd import journal
>>> stream = journal.stream('myapp') # doctest: +SKIP
>>> res = stream.write('message...\n') # doctest: +SKIP
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
If identifier is None, a suitable default based on sys.argv[0] will be used.
This interface can be used conveniently with the print function:
>>> from __future__ import print_function
>>> stream = journal.stream() # doctest: +SKIP
>>> print('message...', file=stream) # doctest: +SKIP
priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`,
`LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority level prefixes
(such as '<1>') are interpreted. See sd-daemon(3) for more information.
"""
if identifier is None:
if not _sys.argv or not _sys.argv[0] or _sys.argv[0] == '-c':
identifier = 'python'
else:
identifier = _sys.argv[0]
fd = stream_fd(identifier, priority, level_prefix)
return _os.fdopen(fd, 'w', 1)
|
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted text strings
are written to the journal.
The file will be line buffered, so messages are actually sent after a
newline character is written.
>>> from systemd import journal
>>> stream = journal.stream('myapp') # doctest: +SKIP
>>> res = stream.write('message...\n') # doctest: +SKIP
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
If identifier is None, a suitable default based on sys.argv[0] will be used.
This interface can be used conveniently with the print function:
>>> from __future__ import print_function
>>> stream = journal.stream() # doctest: +SKIP
>>> print('message...', file=stream) # doctest: +SKIP
priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`,
`LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority level prefixes
(such as '<1>') are interpreted. See sd-daemon(3) for more information.
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L460-L501
| null |
# -*- Mode: python; coding:utf-8; indent-tabs-mode: nil -*- */
#
#
# Copyright 2012 David Strauss <david@davidstrauss.net>
# Copyright 2012 Zbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl>
# Copyright 2012 Marti Raudsepp <marti@juffo.org>
#
# python-systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# python-systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with python-systemd; If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import sys as _sys
import datetime as _datetime
import uuid as _uuid
import traceback as _traceback
import os as _os
import logging as _logging
from syslog import (LOG_EMERG, LOG_ALERT, LOG_CRIT, LOG_ERR,
LOG_WARNING, LOG_NOTICE, LOG_INFO, LOG_DEBUG)
if _sys.version_info >= (3,3):
from collections import ChainMap as _ChainMap
from ._journal import __version__, sendv, stream_fd
from ._reader import (_Reader, NOP, APPEND, INVALIDATE,
LOCAL_ONLY, RUNTIME_ONLY,
SYSTEM, SYSTEM_ONLY, CURRENT_USER,
OS_ROOT,
_get_catalog)
from . import id128 as _id128
if _sys.version_info >= (3,):
from ._reader import Monotonic
else:
Monotonic = tuple
def _convert_monotonic(m):
return Monotonic((_datetime.timedelta(microseconds=m[0]),
_uuid.UUID(bytes=m[1])))
def _convert_source_monotonic(s):
return _datetime.timedelta(microseconds=int(s))
def _convert_realtime(t):
return _datetime.datetime.fromtimestamp(t / 1000000)
def _convert_timestamp(s):
return _datetime.datetime.fromtimestamp(int(s) / 1000000)
def _convert_trivial(x):
return x
if _sys.version_info >= (3,):
def _convert_uuid(s):
return _uuid.UUID(s.decode())
else:
_convert_uuid = _uuid.UUID
DEFAULT_CONVERTERS = {
'MESSAGE_ID': _convert_uuid,
'_MACHINE_ID': _convert_uuid,
'_BOOT_ID': _convert_uuid,
'PRIORITY': int,
'LEADER': int,
'SESSION_ID': int,
'USERSPACE_USEC': int,
'INITRD_USEC': int,
'KERNEL_USEC': int,
'_UID': int,
'_GID': int,
'_PID': int,
'SYSLOG_FACILITY': int,
'SYSLOG_PID': int,
'_AUDIT_SESSION': int,
'_AUDIT_LOGINUID': int,
'_SYSTEMD_SESSION': int,
'_SYSTEMD_OWNER_UID': int,
'CODE_LINE': int,
'ERRNO': int,
'EXIT_STATUS': int,
'_SOURCE_REALTIME_TIMESTAMP': _convert_timestamp,
'__REALTIME_TIMESTAMP': _convert_realtime,
'_SOURCE_MONOTONIC_TIMESTAMP': _convert_source_monotonic,
'__MONOTONIC_TIMESTAMP': _convert_monotonic,
'__CURSOR': _convert_trivial,
'COREDUMP': bytes,
'COREDUMP_PID': int,
'COREDUMP_UID': int,
'COREDUMP_GID': int,
'COREDUMP_SESSION': int,
'COREDUMP_SIGNAL': int,
'COREDUMP_TIMESTAMP': _convert_timestamp,
}
_IDENT_CHARACTER = set('ABCDEFGHIJKLMNOPQRTSUVWXYZ_0123456789')
def _valid_field_name(s):
return not (set(s) - _IDENT_CHARACTER)
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
def get_catalog(mid):
"""Return catalog entry for the specified ID.
`mid` should be either a UUID or a 32 digit hex number.
"""
if isinstance(mid, _uuid.UUID):
mid = mid.hex
return _get_catalog(mid)
def _make_line(field, value):
if isinstance(value, bytes):
return field.encode('utf-8') + b'=' + value
elif isinstance(value, str):
return field + '=' + value
else:
return field + '=' + str(value)
def send(MESSAGE, MESSAGE_ID=None,
CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None,
**kwargs):
r"""Send a message to the journal.
>>> from systemd import journal
>>> journal.send('Hello world')
>>> journal.send('Hello, again, world', FIELD2='Greetings!')
>>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef')
Value of the MESSAGE argument will be used for the MESSAGE= field. MESSAGE
must be a string and will be sent as UTF-8 to the journal.
MESSAGE_ID can be given to uniquely identify the type of message. It must be
a string or a uuid.UUID object.
CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to identify the caller.
Unless at least on of the three is given, values are extracted from the
stack frame of the caller of send(). CODE_FILE and CODE_FUNC must be
strings, CODE_LINE must be an integer.
Additional fields for the journal entry can only be specified as keyword
arguments. The payload can be either a string or bytes. A string will be
sent as UTF-8, and bytes will be sent as-is to the journal.
Other useful fields include PRIORITY, SYSLOG_FACILITY, SYSLOG_IDENTIFIER,
SYSLOG_PID.
"""
args = ['MESSAGE=' + MESSAGE]
if MESSAGE_ID is not None:
id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID)
args.append('MESSAGE_ID=' + id)
if CODE_LINE is CODE_FILE is CODE_FUNC is None:
CODE_FILE, CODE_LINE, CODE_FUNC = _traceback.extract_stack(limit=2)[0][:3]
if CODE_FILE is not None:
args.append('CODE_FILE=' + CODE_FILE)
if CODE_LINE is not None:
args.append('CODE_LINE={:d}'.format(CODE_LINE))
if CODE_FUNC is not None:
args.append('CODE_FUNC=' + CODE_FUNC)
args.extend(_make_line(key, val) for key, val in kwargs.items())
return sendv(*args)
class JournalHandler(_logging.Handler):
"""Journal handler class for the Python logging framework.
Please see the Python logging module documentation for an overview:
http://docs.python.org/library/logging.html.
To create a custom logger whose messages go only to journal:
>>> import logging
>>> log = logging.getLogger('custom_logger_name')
>>> log.propagate = False
>>> log.addHandler(JournalHandler())
>>> log.warning("Some message: %s", 'detail')
Note that by default, message levels `INFO` and `DEBUG` are ignored by the
logging framework. To enable those log levels:
>>> log.setLevel(logging.DEBUG)
To redirect all logging messages to journal regardless of where they come
from, attach it to the root logger:
>>> logging.root.addHandler(JournalHandler())
For more complex configurations when using `dictConfig` or `fileConfig`,
specify `systemd.journal.JournalHandler` as the handler class. Only
standard handler configuration options are supported: `level`, `formatter`,
`filters`.
To attach journal MESSAGE_ID, an extra field is supported:
>>> import uuid
>>> mid = uuid.UUID('0123456789ABCDEF0123456789ABCDEF')
>>> log.warning("Message with ID", extra={'MESSAGE_ID': mid})
Fields to be attached to all messages sent through this handler can be
specified as keyword arguments. This probably makes sense only for
SYSLOG_IDENTIFIER and similar fields which are constant for the whole
program:
>>> JournalHandler(SYSLOG_IDENTIFIER='my-cool-app')
<...JournalHandler ...>
The following journal fields will be sent: `MESSAGE`, `PRIORITY`,
`THREAD_NAME`, `CODE_FILE`, `CODE_LINE`, `CODE_FUNC`, `LOGGER` (name as
supplied to getLogger call), `MESSAGE_ID` (optional, see above),
`SYSLOG_IDENTIFIER` (defaults to sys.argv[0]).
The function used to actually send messages can be overridden using
the `sender_function` parameter.
"""
def __init__(self, level=_logging.NOTSET, sender_function=send, **kwargs):
super(JournalHandler, self).__init__(level)
for name in kwargs:
if not _valid_field_name(name):
raise ValueError('Invalid field name: ' + name)
if 'SYSLOG_IDENTIFIER' not in kwargs:
kwargs['SYSLOG_IDENTIFIER'] = _sys.argv[0]
self.send = sender_function
self._extra = kwargs
def emit(self, record):
"""Write `record` as a journal event.
MESSAGE is taken from the message provided by the user, and PRIORITY,
LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be used if present.
"""
try:
msg = self.format(record)
pri = self.map_priority(record.levelno)
# defaults
extras = self._extra.copy()
# higher priority
if record.exc_text:
extras['EXCEPTION_TEXT'] = record.exc_text
if record.exc_info:
extras['EXCEPTION_INFO'] = record.exc_info
if record.args:
extras['CODE_ARGS'] = str(record.args)
# explicit arguments — highest priority
extras.update(record.__dict__)
self.send(msg,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
PROCESS_NAME=record.processName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**extras)
except Exception:
self.handleError(record)
@staticmethod
def map_priority(levelno):
"""Map logging levels to journald priorities.
Since Python log level numbers are "sparse", we have to map numbers in
between the standard levels too.
"""
if levelno <= _logging.DEBUG:
return LOG_DEBUG
elif levelno <= _logging.INFO:
return LOG_INFO
elif levelno <= _logging.WARNING:
return LOG_WARNING
elif levelno <= _logging.ERROR:
return LOG_ERR
elif levelno <= _logging.CRITICAL:
return LOG_CRIT
else:
return LOG_ALERT
mapPriority = map_priority
|
systemd/python-systemd
|
systemd/journal.py
|
Reader._convert_field
|
python
|
def _convert_field(self, key, value):
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
|
Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L185-L198
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
systemd/python-systemd
|
systemd/journal.py
|
Reader._convert_entry
|
python
|
def _convert_entry(self, entry):
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
|
Convert entire journal entry utilising _convert_field.
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L200-L208
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
systemd/python-systemd
|
systemd/journal.py
|
Reader.add_match
|
python
|
def add_match(self, *args, **kwargs):
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
|
Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L233-L244
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
systemd/python-systemd
|
systemd/journal.py
|
Reader.get_next
|
python
|
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
|
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L246-L265
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
systemd/python-systemd
|
systemd/journal.py
|
Reader.query_unique
|
python
|
def query_unique(self, field):
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
|
Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L283-L293
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
systemd/python-systemd
|
systemd/journal.py
|
Reader.wait
|
python
|
def wait(self, timeout=None):
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
|
Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L295-L306
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
systemd/python-systemd
|
systemd/journal.py
|
Reader.seek_realtime
|
python
|
def seek_realtime(self, realtime):
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
|
Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L308-L327
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
systemd/python-systemd
|
systemd/journal.py
|
Reader.seek_monotonic
|
python
|
def seek_monotonic(self, monotonic, bootid=None):
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
|
Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L329-L342
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
systemd/python-systemd
|
systemd/journal.py
|
Reader.log_level
|
python
|
def log_level(self, level):
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
|
Set maximum log `level` by setting matches for PRIORITY.
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L344-L351
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
systemd/python-systemd
|
systemd/journal.py
|
Reader.messageid_match
|
python
|
def messageid_match(self, messageid):
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
|
Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L353-L363
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
systemd/python-systemd
|
systemd/journal.py
|
Reader.this_boot
|
python
|
def this_boot(self, bootid=None):
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
|
Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L365-L376
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
systemd/python-systemd
|
systemd/journal.py
|
Reader.this_machine
|
python
|
def this_machine(self, machineid=None):
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
|
Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L378-L390
| null |
class Reader(_Reader):
"""Access systemd journal entries.
Entries are subject to filtering and limits, see `add_match`, `this_boot`,
`this_machine` functions and the `data_treshold` attribute.
Note that in order to access the system journal, a non-root user must have
the necessary privileges, see journalctl(1) for details. Unprivileged users
can access only their own journal.
Example usage to print out all informational or higher level messages for
systemd-udevd for this boot:
>>> from systemd import journal
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j: # doctest: +SKIP
... print(entry['MESSAGE'])
starting version ...
See systemd.journal-fields(7) for more info on typical fields found in the
journal.
"""
def __init__(self, flags=None, path=None, files=None, converters=None):
"""Create a new Reader.
Argument `flags` defines the open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens journal
on local machine only; RUNTIME_ONLY opens only volatile journal files;
and SYSTEM_ONLY opens only journal files of system services and the kernel.
Argument `path` is the directory of journal files, either a file system
path or a file descriptor. Note that `flags`, `path`, and `files` are
exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field names are used
as keys into this dictionary. The values must be single argument
functions, which take a `bytes` object and return a converted
value. When there's no entry for a field name, then the default UTF-8
decoding will be attempted. If the conversion fails with a ValueError,
unconverted bytes object will be returned. (Note that ValueEror is a
superclass of UnicodeDecodeError).
Reader implements the context manager protocol: the journal will be
closed when exiting the block.
"""
if flags is None:
if path is None and files is None:
# This mimics journalctl behaviour of default to local journal only
flags = LOCAL_ONLY
else:
flags = 0
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3, 3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key].
If `key` is not present in self.converters, a standard unicode decoding
will be attempted. If the conversion (either key-specific or the
default one) fails with a ValueError, the original bytes object will be
returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Return self.
Part of the iterator protocol.
"""
return self
def __next__(self):
"""Return the next entry in the journal.
Returns self.get_next() or raises StopIteration.
Part of the iterator protocol.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
r"""Return the previous log entry.
Equivalent to get_next(-skip).
Optional `skip` value will return the -`skip`-th log entry.
Entries will be processed with converters specified during Reader
creation.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime)
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
|
systemd/python-systemd
|
systemd/journal.py
|
JournalHandler.emit
|
python
|
def emit(self, record):
try:
msg = self.format(record)
pri = self.map_priority(record.levelno)
# defaults
extras = self._extra.copy()
# higher priority
if record.exc_text:
extras['EXCEPTION_TEXT'] = record.exc_text
if record.exc_info:
extras['EXCEPTION_INFO'] = record.exc_info
if record.args:
extras['CODE_ARGS'] = str(record.args)
# explicit arguments — highest priority
extras.update(record.__dict__)
self.send(msg,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
PROCESS_NAME=record.processName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**extras)
except Exception:
self.handleError(record)
|
Write `record` as a journal event.
MESSAGE is taken from the message provided by the user, and PRIORITY,
LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be used if present.
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L568-L604
|
[
"def map_priority(levelno):\n \"\"\"Map logging levels to journald priorities.\n\n Since Python log level numbers are \"sparse\", we have to map numbers in\n between the standard levels too.\n \"\"\"\n if levelno <= _logging.DEBUG:\n return LOG_DEBUG\n elif levelno <= _logging.INFO:\n return LOG_INFO\n elif levelno <= _logging.WARNING:\n return LOG_WARNING\n elif levelno <= _logging.ERROR:\n return LOG_ERR\n elif levelno <= _logging.CRITICAL:\n return LOG_CRIT\n else:\n return LOG_ALERT\n",
"def send(self, MESSAGE, MESSAGE_ID=None,\n CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None,\n **kwargs):\n args = ['MESSAGE=' + MESSAGE]\n\n if MESSAGE_ID is not None:\n id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID)\n args.append('MESSAGE_ID=' + id)\n\n if CODE_LINE is CODE_FILE is CODE_FUNC is None:\n CODE_FILE, CODE_LINE, CODE_FUNC = _traceback.extract_stack(limit=2)[0][:3]\n if CODE_FILE is not None:\n args.append('CODE_FILE=' + CODE_FILE)\n if CODE_LINE is not None:\n args.append('CODE_LINE={:d}'.format(CODE_LINE))\n if CODE_FUNC is not None:\n args.append('CODE_FUNC=' + CODE_FUNC)\n\n args.extend(_make_line(key, val) for key, val in kwargs.items())\n self.buf.append(args)\n"
] |
class JournalHandler(_logging.Handler):
"""Journal handler class for the Python logging framework.
Please see the Python logging module documentation for an overview:
http://docs.python.org/library/logging.html.
To create a custom logger whose messages go only to journal:
>>> import logging
>>> log = logging.getLogger('custom_logger_name')
>>> log.propagate = False
>>> log.addHandler(JournalHandler())
>>> log.warning("Some message: %s", 'detail')
Note that by default, message levels `INFO` and `DEBUG` are ignored by the
logging framework. To enable those log levels:
>>> log.setLevel(logging.DEBUG)
To redirect all logging messages to journal regardless of where they come
from, attach it to the root logger:
>>> logging.root.addHandler(JournalHandler())
For more complex configurations when using `dictConfig` or `fileConfig`,
specify `systemd.journal.JournalHandler` as the handler class. Only
standard handler configuration options are supported: `level`, `formatter`,
`filters`.
To attach journal MESSAGE_ID, an extra field is supported:
>>> import uuid
>>> mid = uuid.UUID('0123456789ABCDEF0123456789ABCDEF')
>>> log.warning("Message with ID", extra={'MESSAGE_ID': mid})
Fields to be attached to all messages sent through this handler can be
specified as keyword arguments. This probably makes sense only for
SYSLOG_IDENTIFIER and similar fields which are constant for the whole
program:
>>> JournalHandler(SYSLOG_IDENTIFIER='my-cool-app')
<...JournalHandler ...>
The following journal fields will be sent: `MESSAGE`, `PRIORITY`,
`THREAD_NAME`, `CODE_FILE`, `CODE_LINE`, `CODE_FUNC`, `LOGGER` (name as
supplied to getLogger call), `MESSAGE_ID` (optional, see above),
`SYSLOG_IDENTIFIER` (defaults to sys.argv[0]).
The function used to actually send messages can be overridden using
the `sender_function` parameter.
"""
def __init__(self, level=_logging.NOTSET, sender_function=send, **kwargs):
super(JournalHandler, self).__init__(level)
for name in kwargs:
if not _valid_field_name(name):
raise ValueError('Invalid field name: ' + name)
if 'SYSLOG_IDENTIFIER' not in kwargs:
kwargs['SYSLOG_IDENTIFIER'] = _sys.argv[0]
self.send = sender_function
self._extra = kwargs
@staticmethod
def map_priority(levelno):
"""Map logging levels to journald priorities.
Since Python log level numbers are "sparse", we have to map numbers in
between the standard levels too.
"""
if levelno <= _logging.DEBUG:
return LOG_DEBUG
elif levelno <= _logging.INFO:
return LOG_INFO
elif levelno <= _logging.WARNING:
return LOG_WARNING
elif levelno <= _logging.ERROR:
return LOG_ERR
elif levelno <= _logging.CRITICAL:
return LOG_CRIT
else:
return LOG_ALERT
mapPriority = map_priority
|
systemd/python-systemd
|
systemd/daemon.py
|
is_socket_sockaddr
|
python
|
def is_socket_sockaddr(fileobj, address, type=0, flowinfo=0, listening=-1):
fd = _convert_fileobj(fileobj)
return _is_socket_sockaddr(fd, address, type, flowinfo, listening)
|
Check socket type, address and/or port, flowinfo, listening state.
Wraps sd_is_socket_inet_sockaddr(3).
`address` is a systemd-style numerical IPv4 or IPv6 address as used in
ListenStream=. A port may be included after a colon (":").
See systemd.socket(5) for details.
Constants for `family` are defined in the socket module.
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/daemon.py#L33-L45
|
[
"def _convert_fileobj(fileobj):\n try:\n return fileobj.fileno()\n except AttributeError:\n return fileobj\n"
] |
from socket import AF_UNSPEC as _AF_UNSPEC
from ._daemon import (__version__,
booted,
notify,
_listen_fds,
_is_fifo,
_is_socket,
_is_socket_inet,
_is_socket_sockaddr,
_is_socket_unix,
_is_mq,
LISTEN_FDS_START)
def _convert_fileobj(fileobj):
try:
return fileobj.fileno()
except AttributeError:
return fileobj
def is_fifo(fileobj, path=None):
fd = _convert_fileobj(fileobj)
return _is_fifo(fd, path)
def is_socket(fileobj, family=_AF_UNSPEC, type=0, listening=-1):
fd = _convert_fileobj(fileobj)
return _is_socket(fd, family, type, listening)
def is_socket_inet(fileobj, family=_AF_UNSPEC, type=0, listening=-1, port=0):
fd = _convert_fileobj(fileobj)
return _is_socket_inet(fd, family, type, listening, port)
def is_socket_unix(fileobj, type=0, listening=-1, path=None):
fd = _convert_fileobj(fileobj)
return _is_socket_unix(fd, type, listening, path)
def is_mq(fileobj, path=None):
fd = _convert_fileobj(fileobj)
return _is_mq(fd, path)
def listen_fds(unset_environment=True):
"""Return a list of socket activated descriptors
Example::
(in primary window)
$ systemd-activate -l 2000 python3 -c \\
'from systemd.daemon import listen_fds; print(listen_fds())'
(in another window)
$ telnet localhost 2000
(in primary window)
...
Execing python3 (...)
[3]
"""
num = _listen_fds(unset_environment)
return list(range(LISTEN_FDS_START, LISTEN_FDS_START + num))
|
systemd/python-systemd
|
systemd/daemon.py
|
listen_fds
|
python
|
def listen_fds(unset_environment=True):
num = _listen_fds(unset_environment)
return list(range(LISTEN_FDS_START, LISTEN_FDS_START + num))
|
Return a list of socket activated descriptors
Example::
(in primary window)
$ systemd-activate -l 2000 python3 -c \\
'from systemd.daemon import listen_fds; print(listen_fds())'
(in another window)
$ telnet localhost 2000
(in primary window)
...
Execing python3 (...)
[3]
|
train
|
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/daemon.py#L55-L71
| null |
from socket import AF_UNSPEC as _AF_UNSPEC
from ._daemon import (__version__,
booted,
notify,
_listen_fds,
_is_fifo,
_is_socket,
_is_socket_inet,
_is_socket_sockaddr,
_is_socket_unix,
_is_mq,
LISTEN_FDS_START)
def _convert_fileobj(fileobj):
try:
return fileobj.fileno()
except AttributeError:
return fileobj
def is_fifo(fileobj, path=None):
fd = _convert_fileobj(fileobj)
return _is_fifo(fd, path)
def is_socket(fileobj, family=_AF_UNSPEC, type=0, listening=-1):
fd = _convert_fileobj(fileobj)
return _is_socket(fd, family, type, listening)
def is_socket_inet(fileobj, family=_AF_UNSPEC, type=0, listening=-1, port=0):
fd = _convert_fileobj(fileobj)
return _is_socket_inet(fd, family, type, listening, port)
def is_socket_sockaddr(fileobj, address, type=0, flowinfo=0, listening=-1):
"""Check socket type, address and/or port, flowinfo, listening state.
Wraps sd_is_socket_inet_sockaddr(3).
`address` is a systemd-style numerical IPv4 or IPv6 address as used in
ListenStream=. A port may be included after a colon (":").
See systemd.socket(5) for details.
Constants for `family` are defined in the socket module.
"""
fd = _convert_fileobj(fileobj)
return _is_socket_sockaddr(fd, address, type, flowinfo, listening)
def is_socket_unix(fileobj, type=0, listening=-1, path=None):
fd = _convert_fileobj(fileobj)
return _is_socket_unix(fd, type, listening, path)
def is_mq(fileobj, path=None):
fd = _convert_fileobj(fileobj)
return _is_mq(fd, path)
|
JonathanRaiman/pytreebank
|
pytreebank/labeled_trees.py
|
LabeledTree.uproot
|
python
|
def uproot(tree):
uprooted = tree.copy()
uprooted.parent = None
for child in tree.all_children():
uprooted.add_general_child(child)
return uprooted
|
Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L35-L44
|
[
"def copy(self):\n \"\"\"\n Deep Copy of a LabeledTree\n \"\"\"\n return LabeledTree(\n udepth = self.udepth,\n depth = self.depth,\n text = self.text,\n label = self.label,\n children = self.children.copy() if self.children != None else [],\n parent = self.parent)\n",
"def add_general_child(self, child):\n self.general_children.append(child)\n",
"def all_children(self):\n if len(self.children) > 0:\n for child in self.children:\n for subchild in child.all_children():\n yield subchild\n yield self\n else:\n yield self\n"
] |
class LabeledTree(object):
SCORE_MAPPING = [-12.5,-6.25,0.0,6.25,12.5]
def __init__(self,
depth=0,
text=None,
label=None,
children=None,
parent=None,
udepth=1):
self.label = label
self.children = children if children != None else []
self.general_children = []
self.text = text
self.parent = parent
self.depth = depth
self.udepth = udepth
def shrink_tree(tree, final_depth):
if tree.udepth <= final_depth:
return tree
for branch in tree.general_children:
if branch.udepth == final_depth:
return branch.uproot()
def shrunk_trees(tree, final_depth):
if tree.udepth <= final_depth:
yield tree
for branch in tree.general_children:
if branch.udepth == final_depth:
yield branch.uproot()
def copy(self):
"""
Deep Copy of a LabeledTree
"""
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent)
def add_child(self, child):
"""
Adds a branch to the current tree.
"""
self.children.append(child)
child.parent = self
self.udepth = max([child.udepth for child in self.children]) + 1
def add_general_child(self, child):
self.general_children.append(child)
def all_children(self):
if len(self.children) > 0:
for child in self.children:
for subchild in child.all_children():
yield subchild
yield self
else:
yield self
def lowercase(self):
"""
Lowercase all strings in this tree.
Works recursively and in-place.
"""
if len(self.children) > 0:
for child in self.children:
child.lowercase()
else:
self.text = self.text.lower()
def to_dict(self, index=0):
"""
Dict format for use in Javascript / Jason Chuang's display technology.
"""
index += 1
rep = {}
rep["index"] = index
rep["leaf"] = len(self.children) == 0
rep["depth"] = self.udepth
rep["scoreDistr"] = [0.0] * len(LabeledTree.SCORE_MAPPING)
# dirac distribution at correct label
if self.label is not None:
rep["scoreDistr"][self.label] = 1.0
mapping = LabeledTree.SCORE_MAPPING[:]
rep["rating"] = mapping[self.label] - min(mapping)
# if you are using this method for printing predictions
# from a model, the the dot product with the model's output
# distribution should be taken with this list:
rep["numChildren"] = len(self.children)
text = self.text if self.text != None else ""
seen_tokens = 0
witnessed_pixels = 0
for i, child in enumerate(self.children):
if i > 0:
text += " "
child_key = "child%d" % (i)
(rep[child_key], index) = child.to_dict(index)
text += rep[child_key]["text"]
seen_tokens += rep[child_key]["tokens"]
witnessed_pixels += rep[child_key]["pixels"]
rep["text"] = text
rep["tokens"] = 1 if (self.text != None and len(self.text) > 0) else seen_tokens
rep["pixels"] = witnessed_pixels + 3 if len(self.children) > 0 else text_size(self.text)
return (rep, index)
def to_json(self):
rep, _ = self.to_dict()
return json.dumps(rep)
def display(self):
from IPython.display import Javascript, display
display(Javascript("createTrees(["+self.to_json()+"])"))
display(Javascript("updateTrees()"))
def to_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_lines(), self.children[1].to_lines()
self_line = [left_lines[0] + " " + right_lines[0]]
return self_line + left_lines + right_lines
else:
return [self.text]
def to_labeled_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_labeled_lines(), self.children[1].to_labeled_lines()
self_line = [(self.label, left_lines[0][1] + " " + right_lines[0][1])]
return self_line + left_lines + right_lines
else:
return [(self.label, self.text)]
def __str__(self):
"""
String representation of a tree as visible in original corpus.
print(tree)
#=> '(2 (2 not) (3 good))'
Outputs
-------
str: the String representation of the tree.
"""
if len(self.children) > 0:
rep = "(%d " % self.label
for child in self.children:
rep += str(child)
return rep + ")"
else:
text = self.text\
.replace("(", "-LRB-")\
.replace(")", "-RRB-")\
.replace("{", "-LCB-")\
.replace("}", "-RCB-")\
.replace("[", "-LSB-")\
.replace("]", "-RSB-")
return ("(%d %s) " % (self.label, text))
@staticmethod
def inject_visualization_javascript(tree_width=1200, tree_height=400, tree_node_radius=10):
"""
In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations.
"""
from .javascript import insert_sentiment_markup
insert_sentiment_markup(tree_width=tree_width, tree_height=tree_height, tree_node_radius=tree_node_radius)
|
JonathanRaiman/pytreebank
|
pytreebank/labeled_trees.py
|
LabeledTree.copy
|
python
|
def copy(self):
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent)
|
Deep Copy of a LabeledTree
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L60-L70
| null |
class LabeledTree(object):
SCORE_MAPPING = [-12.5,-6.25,0.0,6.25,12.5]
def __init__(self,
depth=0,
text=None,
label=None,
children=None,
parent=None,
udepth=1):
self.label = label
self.children = children if children != None else []
self.general_children = []
self.text = text
self.parent = parent
self.depth = depth
self.udepth = udepth
def uproot(tree):
"""
Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree
"""
uprooted = tree.copy()
uprooted.parent = None
for child in tree.all_children():
uprooted.add_general_child(child)
return uprooted
def shrink_tree(tree, final_depth):
if tree.udepth <= final_depth:
return tree
for branch in tree.general_children:
if branch.udepth == final_depth:
return branch.uproot()
def shrunk_trees(tree, final_depth):
if tree.udepth <= final_depth:
yield tree
for branch in tree.general_children:
if branch.udepth == final_depth:
yield branch.uproot()
def add_child(self, child):
"""
Adds a branch to the current tree.
"""
self.children.append(child)
child.parent = self
self.udepth = max([child.udepth for child in self.children]) + 1
def add_general_child(self, child):
self.general_children.append(child)
def all_children(self):
if len(self.children) > 0:
for child in self.children:
for subchild in child.all_children():
yield subchild
yield self
else:
yield self
def lowercase(self):
"""
Lowercase all strings in this tree.
Works recursively and in-place.
"""
if len(self.children) > 0:
for child in self.children:
child.lowercase()
else:
self.text = self.text.lower()
def to_dict(self, index=0):
"""
Dict format for use in Javascript / Jason Chuang's display technology.
"""
index += 1
rep = {}
rep["index"] = index
rep["leaf"] = len(self.children) == 0
rep["depth"] = self.udepth
rep["scoreDistr"] = [0.0] * len(LabeledTree.SCORE_MAPPING)
# dirac distribution at correct label
if self.label is not None:
rep["scoreDistr"][self.label] = 1.0
mapping = LabeledTree.SCORE_MAPPING[:]
rep["rating"] = mapping[self.label] - min(mapping)
# if you are using this method for printing predictions
# from a model, the the dot product with the model's output
# distribution should be taken with this list:
rep["numChildren"] = len(self.children)
text = self.text if self.text != None else ""
seen_tokens = 0
witnessed_pixels = 0
for i, child in enumerate(self.children):
if i > 0:
text += " "
child_key = "child%d" % (i)
(rep[child_key], index) = child.to_dict(index)
text += rep[child_key]["text"]
seen_tokens += rep[child_key]["tokens"]
witnessed_pixels += rep[child_key]["pixels"]
rep["text"] = text
rep["tokens"] = 1 if (self.text != None and len(self.text) > 0) else seen_tokens
rep["pixels"] = witnessed_pixels + 3 if len(self.children) > 0 else text_size(self.text)
return (rep, index)
def to_json(self):
rep, _ = self.to_dict()
return json.dumps(rep)
def display(self):
from IPython.display import Javascript, display
display(Javascript("createTrees(["+self.to_json()+"])"))
display(Javascript("updateTrees()"))
def to_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_lines(), self.children[1].to_lines()
self_line = [left_lines[0] + " " + right_lines[0]]
return self_line + left_lines + right_lines
else:
return [self.text]
def to_labeled_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_labeled_lines(), self.children[1].to_labeled_lines()
self_line = [(self.label, left_lines[0][1] + " " + right_lines[0][1])]
return self_line + left_lines + right_lines
else:
return [(self.label, self.text)]
def __str__(self):
"""
String representation of a tree as visible in original corpus.
print(tree)
#=> '(2 (2 not) (3 good))'
Outputs
-------
str: the String representation of the tree.
"""
if len(self.children) > 0:
rep = "(%d " % self.label
for child in self.children:
rep += str(child)
return rep + ")"
else:
text = self.text\
.replace("(", "-LRB-")\
.replace(")", "-RRB-")\
.replace("{", "-LCB-")\
.replace("}", "-RCB-")\
.replace("[", "-LSB-")\
.replace("]", "-RSB-")
return ("(%d %s) " % (self.label, text))
@staticmethod
def inject_visualization_javascript(tree_width=1200, tree_height=400, tree_node_radius=10):
"""
In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations.
"""
from .javascript import insert_sentiment_markup
insert_sentiment_markup(tree_width=tree_width, tree_height=tree_height, tree_node_radius=tree_node_radius)
|
JonathanRaiman/pytreebank
|
pytreebank/labeled_trees.py
|
LabeledTree.add_child
|
python
|
def add_child(self, child):
self.children.append(child)
child.parent = self
self.udepth = max([child.udepth for child in self.children]) + 1
|
Adds a branch to the current tree.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L72-L78
| null |
class LabeledTree(object):
SCORE_MAPPING = [-12.5,-6.25,0.0,6.25,12.5]
def __init__(self,
depth=0,
text=None,
label=None,
children=None,
parent=None,
udepth=1):
self.label = label
self.children = children if children != None else []
self.general_children = []
self.text = text
self.parent = parent
self.depth = depth
self.udepth = udepth
def uproot(tree):
"""
Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree
"""
uprooted = tree.copy()
uprooted.parent = None
for child in tree.all_children():
uprooted.add_general_child(child)
return uprooted
def shrink_tree(tree, final_depth):
if tree.udepth <= final_depth:
return tree
for branch in tree.general_children:
if branch.udepth == final_depth:
return branch.uproot()
def shrunk_trees(tree, final_depth):
if tree.udepth <= final_depth:
yield tree
for branch in tree.general_children:
if branch.udepth == final_depth:
yield branch.uproot()
def copy(self):
"""
Deep Copy of a LabeledTree
"""
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent)
def add_general_child(self, child):
self.general_children.append(child)
def all_children(self):
if len(self.children) > 0:
for child in self.children:
for subchild in child.all_children():
yield subchild
yield self
else:
yield self
def lowercase(self):
"""
Lowercase all strings in this tree.
Works recursively and in-place.
"""
if len(self.children) > 0:
for child in self.children:
child.lowercase()
else:
self.text = self.text.lower()
def to_dict(self, index=0):
"""
Dict format for use in Javascript / Jason Chuang's display technology.
"""
index += 1
rep = {}
rep["index"] = index
rep["leaf"] = len(self.children) == 0
rep["depth"] = self.udepth
rep["scoreDistr"] = [0.0] * len(LabeledTree.SCORE_MAPPING)
# dirac distribution at correct label
if self.label is not None:
rep["scoreDistr"][self.label] = 1.0
mapping = LabeledTree.SCORE_MAPPING[:]
rep["rating"] = mapping[self.label] - min(mapping)
# if you are using this method for printing predictions
# from a model, the the dot product with the model's output
# distribution should be taken with this list:
rep["numChildren"] = len(self.children)
text = self.text if self.text != None else ""
seen_tokens = 0
witnessed_pixels = 0
for i, child in enumerate(self.children):
if i > 0:
text += " "
child_key = "child%d" % (i)
(rep[child_key], index) = child.to_dict(index)
text += rep[child_key]["text"]
seen_tokens += rep[child_key]["tokens"]
witnessed_pixels += rep[child_key]["pixels"]
rep["text"] = text
rep["tokens"] = 1 if (self.text != None and len(self.text) > 0) else seen_tokens
rep["pixels"] = witnessed_pixels + 3 if len(self.children) > 0 else text_size(self.text)
return (rep, index)
def to_json(self):
rep, _ = self.to_dict()
return json.dumps(rep)
def display(self):
from IPython.display import Javascript, display
display(Javascript("createTrees(["+self.to_json()+"])"))
display(Javascript("updateTrees()"))
def to_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_lines(), self.children[1].to_lines()
self_line = [left_lines[0] + " " + right_lines[0]]
return self_line + left_lines + right_lines
else:
return [self.text]
def to_labeled_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_labeled_lines(), self.children[1].to_labeled_lines()
self_line = [(self.label, left_lines[0][1] + " " + right_lines[0][1])]
return self_line + left_lines + right_lines
else:
return [(self.label, self.text)]
def __str__(self):
"""
String representation of a tree as visible in original corpus.
print(tree)
#=> '(2 (2 not) (3 good))'
Outputs
-------
str: the String representation of the tree.
"""
if len(self.children) > 0:
rep = "(%d " % self.label
for child in self.children:
rep += str(child)
return rep + ")"
else:
text = self.text\
.replace("(", "-LRB-")\
.replace(")", "-RRB-")\
.replace("{", "-LCB-")\
.replace("}", "-RCB-")\
.replace("[", "-LSB-")\
.replace("]", "-RSB-")
return ("(%d %s) " % (self.label, text))
@staticmethod
def inject_visualization_javascript(tree_width=1200, tree_height=400, tree_node_radius=10):
"""
In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations.
"""
from .javascript import insert_sentiment_markup
insert_sentiment_markup(tree_width=tree_width, tree_height=tree_height, tree_node_radius=tree_node_radius)
|
JonathanRaiman/pytreebank
|
pytreebank/labeled_trees.py
|
LabeledTree.lowercase
|
python
|
def lowercase(self):
if len(self.children) > 0:
for child in self.children:
child.lowercase()
else:
self.text = self.text.lower()
|
Lowercase all strings in this tree.
Works recursively and in-place.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L92-L101
| null |
class LabeledTree(object):
SCORE_MAPPING = [-12.5,-6.25,0.0,6.25,12.5]
def __init__(self,
depth=0,
text=None,
label=None,
children=None,
parent=None,
udepth=1):
self.label = label
self.children = children if children != None else []
self.general_children = []
self.text = text
self.parent = parent
self.depth = depth
self.udepth = udepth
def uproot(tree):
"""
Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree
"""
uprooted = tree.copy()
uprooted.parent = None
for child in tree.all_children():
uprooted.add_general_child(child)
return uprooted
def shrink_tree(tree, final_depth):
if tree.udepth <= final_depth:
return tree
for branch in tree.general_children:
if branch.udepth == final_depth:
return branch.uproot()
def shrunk_trees(tree, final_depth):
if tree.udepth <= final_depth:
yield tree
for branch in tree.general_children:
if branch.udepth == final_depth:
yield branch.uproot()
def copy(self):
"""
Deep Copy of a LabeledTree
"""
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent)
def add_child(self, child):
"""
Adds a branch to the current tree.
"""
self.children.append(child)
child.parent = self
self.udepth = max([child.udepth for child in self.children]) + 1
def add_general_child(self, child):
self.general_children.append(child)
def all_children(self):
if len(self.children) > 0:
for child in self.children:
for subchild in child.all_children():
yield subchild
yield self
else:
yield self
def to_dict(self, index=0):
"""
Dict format for use in Javascript / Jason Chuang's display technology.
"""
index += 1
rep = {}
rep["index"] = index
rep["leaf"] = len(self.children) == 0
rep["depth"] = self.udepth
rep["scoreDistr"] = [0.0] * len(LabeledTree.SCORE_MAPPING)
# dirac distribution at correct label
if self.label is not None:
rep["scoreDistr"][self.label] = 1.0
mapping = LabeledTree.SCORE_MAPPING[:]
rep["rating"] = mapping[self.label] - min(mapping)
# if you are using this method for printing predictions
# from a model, the the dot product with the model's output
# distribution should be taken with this list:
rep["numChildren"] = len(self.children)
text = self.text if self.text != None else ""
seen_tokens = 0
witnessed_pixels = 0
for i, child in enumerate(self.children):
if i > 0:
text += " "
child_key = "child%d" % (i)
(rep[child_key], index) = child.to_dict(index)
text += rep[child_key]["text"]
seen_tokens += rep[child_key]["tokens"]
witnessed_pixels += rep[child_key]["pixels"]
rep["text"] = text
rep["tokens"] = 1 if (self.text != None and len(self.text) > 0) else seen_tokens
rep["pixels"] = witnessed_pixels + 3 if len(self.children) > 0 else text_size(self.text)
return (rep, index)
def to_json(self):
rep, _ = self.to_dict()
return json.dumps(rep)
def display(self):
from IPython.display import Javascript, display
display(Javascript("createTrees(["+self.to_json()+"])"))
display(Javascript("updateTrees()"))
def to_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_lines(), self.children[1].to_lines()
self_line = [left_lines[0] + " " + right_lines[0]]
return self_line + left_lines + right_lines
else:
return [self.text]
def to_labeled_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_labeled_lines(), self.children[1].to_labeled_lines()
self_line = [(self.label, left_lines[0][1] + " " + right_lines[0][1])]
return self_line + left_lines + right_lines
else:
return [(self.label, self.text)]
def __str__(self):
"""
String representation of a tree as visible in original corpus.
print(tree)
#=> '(2 (2 not) (3 good))'
Outputs
-------
str: the String representation of the tree.
"""
if len(self.children) > 0:
rep = "(%d " % self.label
for child in self.children:
rep += str(child)
return rep + ")"
else:
text = self.text\
.replace("(", "-LRB-")\
.replace(")", "-RRB-")\
.replace("{", "-LCB-")\
.replace("}", "-RCB-")\
.replace("[", "-LSB-")\
.replace("]", "-RSB-")
return ("(%d %s) " % (self.label, text))
@staticmethod
def inject_visualization_javascript(tree_width=1200, tree_height=400, tree_node_radius=10):
"""
In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations.
"""
from .javascript import insert_sentiment_markup
insert_sentiment_markup(tree_width=tree_width, tree_height=tree_height, tree_node_radius=tree_node_radius)
|
JonathanRaiman/pytreebank
|
pytreebank/labeled_trees.py
|
LabeledTree.to_dict
|
python
|
def to_dict(self, index=0):
index += 1
rep = {}
rep["index"] = index
rep["leaf"] = len(self.children) == 0
rep["depth"] = self.udepth
rep["scoreDistr"] = [0.0] * len(LabeledTree.SCORE_MAPPING)
# dirac distribution at correct label
if self.label is not None:
rep["scoreDistr"][self.label] = 1.0
mapping = LabeledTree.SCORE_MAPPING[:]
rep["rating"] = mapping[self.label] - min(mapping)
# if you are using this method for printing predictions
# from a model, the the dot product with the model's output
# distribution should be taken with this list:
rep["numChildren"] = len(self.children)
text = self.text if self.text != None else ""
seen_tokens = 0
witnessed_pixels = 0
for i, child in enumerate(self.children):
if i > 0:
text += " "
child_key = "child%d" % (i)
(rep[child_key], index) = child.to_dict(index)
text += rep[child_key]["text"]
seen_tokens += rep[child_key]["tokens"]
witnessed_pixels += rep[child_key]["pixels"]
rep["text"] = text
rep["tokens"] = 1 if (self.text != None and len(self.text) > 0) else seen_tokens
rep["pixels"] = witnessed_pixels + 3 if len(self.children) > 0 else text_size(self.text)
return (rep, index)
|
Dict format for use in Javascript / Jason Chuang's display technology.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L103-L137
|
[
"def text_size(text):\n return max(4, font.getsize(text)[0][0])\n",
"def text_size(text):\n # TODO(contributors): make changes here to incorporate cap and uncap unknown words.\n return max(4, int(len(text) * 1.1))\n"
] |
class LabeledTree(object):
SCORE_MAPPING = [-12.5,-6.25,0.0,6.25,12.5]
def __init__(self,
depth=0,
text=None,
label=None,
children=None,
parent=None,
udepth=1):
self.label = label
self.children = children if children != None else []
self.general_children = []
self.text = text
self.parent = parent
self.depth = depth
self.udepth = udepth
def uproot(tree):
"""
Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree
"""
uprooted = tree.copy()
uprooted.parent = None
for child in tree.all_children():
uprooted.add_general_child(child)
return uprooted
def shrink_tree(tree, final_depth):
if tree.udepth <= final_depth:
return tree
for branch in tree.general_children:
if branch.udepth == final_depth:
return branch.uproot()
def shrunk_trees(tree, final_depth):
if tree.udepth <= final_depth:
yield tree
for branch in tree.general_children:
if branch.udepth == final_depth:
yield branch.uproot()
def copy(self):
"""
Deep Copy of a LabeledTree
"""
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent)
def add_child(self, child):
"""
Adds a branch to the current tree.
"""
self.children.append(child)
child.parent = self
self.udepth = max([child.udepth for child in self.children]) + 1
def add_general_child(self, child):
self.general_children.append(child)
def all_children(self):
if len(self.children) > 0:
for child in self.children:
for subchild in child.all_children():
yield subchild
yield self
else:
yield self
def lowercase(self):
"""
Lowercase all strings in this tree.
Works recursively and in-place.
"""
if len(self.children) > 0:
for child in self.children:
child.lowercase()
else:
self.text = self.text.lower()
def to_json(self):
rep, _ = self.to_dict()
return json.dumps(rep)
def display(self):
from IPython.display import Javascript, display
display(Javascript("createTrees(["+self.to_json()+"])"))
display(Javascript("updateTrees()"))
def to_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_lines(), self.children[1].to_lines()
self_line = [left_lines[0] + " " + right_lines[0]]
return self_line + left_lines + right_lines
else:
return [self.text]
def to_labeled_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_labeled_lines(), self.children[1].to_labeled_lines()
self_line = [(self.label, left_lines[0][1] + " " + right_lines[0][1])]
return self_line + left_lines + right_lines
else:
return [(self.label, self.text)]
def __str__(self):
"""
String representation of a tree as visible in original corpus.
print(tree)
#=> '(2 (2 not) (3 good))'
Outputs
-------
str: the String representation of the tree.
"""
if len(self.children) > 0:
rep = "(%d " % self.label
for child in self.children:
rep += str(child)
return rep + ")"
else:
text = self.text\
.replace("(", "-LRB-")\
.replace(")", "-RRB-")\
.replace("{", "-LCB-")\
.replace("}", "-RCB-")\
.replace("[", "-LSB-")\
.replace("]", "-RSB-")
return ("(%d %s) " % (self.label, text))
@staticmethod
def inject_visualization_javascript(tree_width=1200, tree_height=400, tree_node_radius=10):
"""
In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations.
"""
from .javascript import insert_sentiment_markup
insert_sentiment_markup(tree_width=tree_width, tree_height=tree_height, tree_node_radius=tree_node_radius)
|
JonathanRaiman/pytreebank
|
pytreebank/labeled_trees.py
|
LabeledTree.inject_visualization_javascript
|
python
|
def inject_visualization_javascript(tree_width=1200, tree_height=400, tree_node_radius=10):
from .javascript import insert_sentiment_markup
insert_sentiment_markup(tree_width=tree_width, tree_height=tree_height, tree_node_radius=tree_node_radius)
|
In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L195-L201
|
[
"def insert_sentiment_markup(tree_width=1200, tree_height=400, tree_node_radius=10):\n\tinsert_stanford_javascript(\n\t\ttree_width=tree_width,\n\t\ttree_height=tree_height,\n\t\ttree_node_radius=tree_node_radius\n\t)\n\tinsert_stanford_styles()\n\timport_tag(\"div\", className='trees')\n"
] |
class LabeledTree(object):
SCORE_MAPPING = [-12.5,-6.25,0.0,6.25,12.5]
def __init__(self,
depth=0,
text=None,
label=None,
children=None,
parent=None,
udepth=1):
self.label = label
self.children = children if children != None else []
self.general_children = []
self.text = text
self.parent = parent
self.depth = depth
self.udepth = udepth
def uproot(tree):
"""
Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree
"""
uprooted = tree.copy()
uprooted.parent = None
for child in tree.all_children():
uprooted.add_general_child(child)
return uprooted
def shrink_tree(tree, final_depth):
if tree.udepth <= final_depth:
return tree
for branch in tree.general_children:
if branch.udepth == final_depth:
return branch.uproot()
def shrunk_trees(tree, final_depth):
if tree.udepth <= final_depth:
yield tree
for branch in tree.general_children:
if branch.udepth == final_depth:
yield branch.uproot()
def copy(self):
"""
Deep Copy of a LabeledTree
"""
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent)
def add_child(self, child):
"""
Adds a branch to the current tree.
"""
self.children.append(child)
child.parent = self
self.udepth = max([child.udepth for child in self.children]) + 1
def add_general_child(self, child):
self.general_children.append(child)
def all_children(self):
if len(self.children) > 0:
for child in self.children:
for subchild in child.all_children():
yield subchild
yield self
else:
yield self
def lowercase(self):
"""
Lowercase all strings in this tree.
Works recursively and in-place.
"""
if len(self.children) > 0:
for child in self.children:
child.lowercase()
else:
self.text = self.text.lower()
def to_dict(self, index=0):
"""
Dict format for use in Javascript / Jason Chuang's display technology.
"""
index += 1
rep = {}
rep["index"] = index
rep["leaf"] = len(self.children) == 0
rep["depth"] = self.udepth
rep["scoreDistr"] = [0.0] * len(LabeledTree.SCORE_MAPPING)
# dirac distribution at correct label
if self.label is not None:
rep["scoreDistr"][self.label] = 1.0
mapping = LabeledTree.SCORE_MAPPING[:]
rep["rating"] = mapping[self.label] - min(mapping)
# if you are using this method for printing predictions
# from a model, the the dot product with the model's output
# distribution should be taken with this list:
rep["numChildren"] = len(self.children)
text = self.text if self.text != None else ""
seen_tokens = 0
witnessed_pixels = 0
for i, child in enumerate(self.children):
if i > 0:
text += " "
child_key = "child%d" % (i)
(rep[child_key], index) = child.to_dict(index)
text += rep[child_key]["text"]
seen_tokens += rep[child_key]["tokens"]
witnessed_pixels += rep[child_key]["pixels"]
rep["text"] = text
rep["tokens"] = 1 if (self.text != None and len(self.text) > 0) else seen_tokens
rep["pixels"] = witnessed_pixels + 3 if len(self.children) > 0 else text_size(self.text)
return (rep, index)
def to_json(self):
rep, _ = self.to_dict()
return json.dumps(rep)
def display(self):
from IPython.display import Javascript, display
display(Javascript("createTrees(["+self.to_json()+"])"))
display(Javascript("updateTrees()"))
def to_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_lines(), self.children[1].to_lines()
self_line = [left_lines[0] + " " + right_lines[0]]
return self_line + left_lines + right_lines
else:
return [self.text]
def to_labeled_lines(self):
if len(self.children) > 0:
left_lines, right_lines = self.children[0].to_labeled_lines(), self.children[1].to_labeled_lines()
self_line = [(self.label, left_lines[0][1] + " " + right_lines[0][1])]
return self_line + left_lines + right_lines
else:
return [(self.label, self.text)]
def __str__(self):
"""
String representation of a tree as visible in original corpus.
print(tree)
#=> '(2 (2 not) (3 good))'
Outputs
-------
str: the String representation of the tree.
"""
if len(self.children) > 0:
rep = "(%d " % self.label
for child in self.children:
rep += str(child)
return rep + ")"
else:
text = self.text\
.replace("(", "-LRB-")\
.replace(")", "-RRB-")\
.replace("{", "-LCB-")\
.replace("}", "-RCB-")\
.replace("[", "-LSB-")\
.replace("]", "-RSB-")
return ("(%d %s) " % (self.label, text))
@staticmethod
|
JonathanRaiman/pytreebank
|
pytreebank/download.py
|
delete_paths
|
python
|
def delete_paths(paths):
for path in paths:
if exists(path):
if isfile(path):
remove(path)
else:
rmtree(path)
|
Delete a list of paths that are files or directories.
If a file/directory does not exist, skip it.
Arguments:
----------
paths : list<str>, names of files/directories to remove.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/download.py#L9-L25
| null |
from os import stat, makedirs, remove
from os.path import join, exists, isfile
from zipfile import ZipFile
from shutil import move, rmtree
from . import utils
def download_sst(path, url):
""""
Download from `url` the zip file corresponding to the
Stanford Sentiment Treebank and expand the resulting
files into the directory `path` (Note: if the files are
already present, the download is not actually run).
Arguments
---------
path : str, directory to save the train, test, and dev files.
url : str, location of zip file on the web
Returns:
--------
dict<str, str>: file path for the keys train, test, dev.
"""
local_files = {
"train": join(path, "train.txt"),
"test": join(path, "test.txt"),
"dev": join(path, "dev.txt")
}
makedirs(path, exist_ok=True)
if all(exists(fname) and stat(fname).st_size > 100 for fname in local_files.values()):
return local_files
zip_local = join(path, 'trainDevTestTrees_PTB.zip')
delete_paths([zip_local, join(path, "trees")] + list(local_files.values()))
utils.urlretrieve(url, zip_local)
ZipFile(zip_local).extractall(path)
for fname in local_files.values():
move(join(path, 'trees', fname.split('/')[-1]), fname)
delete_paths([zip_local, join(path, 'trainDevTestTrees_PTB', 'trees'), join(path, 'trainDevTestTrees_PTB')])
return local_files
|
JonathanRaiman/pytreebank
|
pytreebank/download.py
|
download_sst
|
python
|
def download_sst(path, url):
"
local_files = {
"train": join(path, "train.txt"),
"test": join(path, "test.txt"),
"dev": join(path, "dev.txt")
}
makedirs(path, exist_ok=True)
if all(exists(fname) and stat(fname).st_size > 100 for fname in local_files.values()):
return local_files
zip_local = join(path, 'trainDevTestTrees_PTB.zip')
delete_paths([zip_local, join(path, "trees")] + list(local_files.values()))
utils.urlretrieve(url, zip_local)
ZipFile(zip_local).extractall(path)
for fname in local_files.values():
move(join(path, 'trees', fname.split('/')[-1]), fname)
delete_paths([zip_local, join(path, 'trainDevTestTrees_PTB', 'trees'), join(path, 'trainDevTestTrees_PTB')])
return local_files
|
Download from `url` the zip file corresponding to the
Stanford Sentiment Treebank and expand the resulting
files into the directory `path` (Note: if the files are
already present, the download is not actually run).
Arguments
---------
path : str, directory to save the train, test, and dev files.
url : str, location of zip file on the web
Returns:
--------
dict<str, str>: file path for the keys train, test, dev.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/download.py#L28-L62
|
[
"def delete_paths(paths):\n \"\"\"\n Delete a list of paths that are files or directories.\n If a file/directory does not exist, skip it.\n\n Arguments:\n ----------\n\n paths : list<str>, names of files/directories to remove.\n\n \"\"\"\n for path in paths:\n if exists(path):\n if isfile(path):\n remove(path)\n else:\n rmtree(path)\n"
] |
from os import stat, makedirs, remove
from os.path import join, exists, isfile
from zipfile import ZipFile
from shutil import move, rmtree
from . import utils
def delete_paths(paths):
"""
Delete a list of paths that are files or directories.
If a file/directory does not exist, skip it.
Arguments:
----------
paths : list<str>, names of files/directories to remove.
"""
for path in paths:
if exists(path):
if isfile(path):
remove(path)
else:
rmtree(path)
|
JonathanRaiman/pytreebank
|
pytreebank/parse.py
|
attribute_text_label
|
python
|
def attribute_text_label(node, current_word):
node.text = normalize_string(current_word)
node.text = node.text.strip(" ")
node.udepth = 1
if len(node.text) > 0 and node.text[0].isdigit():
split_sent = node.text.split(" ", 1)
label = split_sent[0]
if len(split_sent) > 1:
text = split_sent[1]
node.text = text
if all(c.isdigit() for c in label):
node.label = int(label)
else:
text = label + " " + text
node.text = text
if len(node.text) == 0:
node.text = None
|
Tries to recover the label inside a string
of the form '(3 hello)' where 3 is the label,
and hello is the string. Label is not assigned
if the string does not follow the expected
format.
Arguments:
----------
node : LabeledTree, current node that should
possibly receive a label.
current_word : str, input string.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L15-L46
|
[
"def normalize_string(string):\n \"\"\"\n Standardize input strings by making\n non-ascii spaces be ascii, and by converting\n treebank-style brackets/parenthesis be characters\n once more.\n\n Arguments:\n ----------\n string : str, characters to be standardized.\n\n Returns:\n --------\n str : standardized\n \"\"\"\n return string.replace(\"\\xa0\", \" \")\\\n .replace(\"\\\\\", \"\")\\\n .replace(\"-LRB-\", \"(\")\\\n .replace(\"-RRB-\", \")\")\\\n .replace(\"-LCB-\", \"{\")\\\n .replace(\"-RCB-\", \"}\")\\\n .replace(\"-LSB-\", \"[\")\\\n .replace(\"-RSB-\", \"]\")\n"
] |
import codecs
import os
from collections import OrderedDict
from .labeled_trees import LabeledTree
from .download import download_sst
from .utils import makedirs, normalize_string
class ParseError(ValueError):
pass
def create_tree_from_string(line):
"""
Parse and convert a string representation
of an example into a LabeledTree datastructure.
Arguments:
----------
line : str, string version of the tree.
Returns:
--------
LabeledTree : parsed tree.
"""
depth = 0
current_word = ""
root = None
current_node = root
for char in line:
if char == '(':
if current_node is not None and len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
depth += 1
if depth > 1:
# replace current head node by this node:
child = LabeledTree(depth=depth)
current_node.add_child(child)
current_node = child
root.add_general_child(child)
else:
root = LabeledTree(depth=depth)
root.add_general_child(root)
current_node = root
elif char == ')':
# assign current word:
if len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
# go up a level:
depth -= 1
if current_node.parent != None:
current_node.parent.udepth = max(current_node.udepth+1, current_node.parent.udepth)
current_node = current_node.parent
else:
# add to current read word
current_word += char
if depth != 0:
raise ParseError("Not an equal amount of closing and opening parentheses")
return root
class LabeledTreeCorpus(list):
"""
Read in the Stanford Sentiment Treebank using the original serialization format:
> (3 (2 this) (3 (2 is) (3 good ) )
"""
def labels(self):
"""
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
"""
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings
def to_file(self, path, mode="w"):
"""
Save the corpus to a text file in the
original format.
Arguments:
----------
path : str, where to save the corpus.
mode : str, how to open the file.
"""
with open(path, mode=mode) as f:
for tree in self:
for label, line in tree.to_labeled_lines():
f.write(line + "\n")
def import_tree_corpus(path):
"""
Import a text file of treebank trees.
Arguments:
----------
path : str, filename for tree corpus.
Returns:
--------
list<LabeledTree> : loaded examples.
"""
tree_list = LabeledTreeCorpus()
with codecs.open(path, "r", "UTF-8") as f:
for line in f:
tree_list.append(create_tree_from_string(line))
return tree_list
def load_sst(path=None,
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
"""
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
"""
if path is None:
# find a good temporary path
path = os.path.expanduser("~/stanford_sentiment_treebank/")
makedirs(path, exist_ok=True)
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for key, value in fnames.items()}
|
JonathanRaiman/pytreebank
|
pytreebank/parse.py
|
create_tree_from_string
|
python
|
def create_tree_from_string(line):
depth = 0
current_word = ""
root = None
current_node = root
for char in line:
if char == '(':
if current_node is not None and len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
depth += 1
if depth > 1:
# replace current head node by this node:
child = LabeledTree(depth=depth)
current_node.add_child(child)
current_node = child
root.add_general_child(child)
else:
root = LabeledTree(depth=depth)
root.add_general_child(root)
current_node = root
elif char == ')':
# assign current word:
if len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
# go up a level:
depth -= 1
if current_node.parent != None:
current_node.parent.udepth = max(current_node.udepth+1, current_node.parent.udepth)
current_node = current_node.parent
else:
# add to current read word
current_word += char
if depth != 0:
raise ParseError("Not an equal amount of closing and opening parentheses")
return root
|
Parse and convert a string representation
of an example into a LabeledTree datastructure.
Arguments:
----------
line : str, string version of the tree.
Returns:
--------
LabeledTree : parsed tree.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L49-L101
|
[
"def attribute_text_label(node, current_word):\n \"\"\"\n Tries to recover the label inside a string\n of the form '(3 hello)' where 3 is the label,\n and hello is the string. Label is not assigned\n if the string does not follow the expected\n format.\n\n Arguments:\n ----------\n node : LabeledTree, current node that should\n possibly receive a label.\n current_word : str, input string.\n \"\"\"\n node.text = normalize_string(current_word)\n node.text = node.text.strip(\" \")\n node.udepth = 1\n if len(node.text) > 0 and node.text[0].isdigit():\n split_sent = node.text.split(\" \", 1)\n label = split_sent[0]\n if len(split_sent) > 1:\n text = split_sent[1]\n node.text = text\n\n if all(c.isdigit() for c in label):\n node.label = int(label)\n else:\n text = label + \" \" + text\n node.text = text\n\n if len(node.text) == 0:\n node.text = None\n",
"def add_child(self, child):\n \"\"\"\n Adds a branch to the current tree.\n \"\"\"\n self.children.append(child)\n child.parent = self\n self.udepth = max([child.udepth for child in self.children]) + 1\n",
"def add_general_child(self, child):\n self.general_children.append(child)\n"
] |
import codecs
import os
from collections import OrderedDict
from .labeled_trees import LabeledTree
from .download import download_sst
from .utils import makedirs, normalize_string
class ParseError(ValueError):
pass
def attribute_text_label(node, current_word):
"""
Tries to recover the label inside a string
of the form '(3 hello)' where 3 is the label,
and hello is the string. Label is not assigned
if the string does not follow the expected
format.
Arguments:
----------
node : LabeledTree, current node that should
possibly receive a label.
current_word : str, input string.
"""
node.text = normalize_string(current_word)
node.text = node.text.strip(" ")
node.udepth = 1
if len(node.text) > 0 and node.text[0].isdigit():
split_sent = node.text.split(" ", 1)
label = split_sent[0]
if len(split_sent) > 1:
text = split_sent[1]
node.text = text
if all(c.isdigit() for c in label):
node.label = int(label)
else:
text = label + " " + text
node.text = text
if len(node.text) == 0:
node.text = None
class LabeledTreeCorpus(list):
"""
Read in the Stanford Sentiment Treebank using the original serialization format:
> (3 (2 this) (3 (2 is) (3 good ) )
"""
def labels(self):
"""
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
"""
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings
def to_file(self, path, mode="w"):
"""
Save the corpus to a text file in the
original format.
Arguments:
----------
path : str, where to save the corpus.
mode : str, how to open the file.
"""
with open(path, mode=mode) as f:
for tree in self:
for label, line in tree.to_labeled_lines():
f.write(line + "\n")
def import_tree_corpus(path):
"""
Import a text file of treebank trees.
Arguments:
----------
path : str, filename for tree corpus.
Returns:
--------
list<LabeledTree> : loaded examples.
"""
tree_list = LabeledTreeCorpus()
with codecs.open(path, "r", "UTF-8") as f:
for line in f:
tree_list.append(create_tree_from_string(line))
return tree_list
def load_sst(path=None,
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
"""
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
"""
if path is None:
# find a good temporary path
path = os.path.expanduser("~/stanford_sentiment_treebank/")
makedirs(path, exist_ok=True)
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for key, value in fnames.items()}
|
JonathanRaiman/pytreebank
|
pytreebank/parse.py
|
import_tree_corpus
|
python
|
def import_tree_corpus(path):
tree_list = LabeledTreeCorpus()
with codecs.open(path, "r", "UTF-8") as f:
for line in f:
tree_list.append(create_tree_from_string(line))
return tree_list
|
Import a text file of treebank trees.
Arguments:
----------
path : str, filename for tree corpus.
Returns:
--------
list<LabeledTree> : loaded examples.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L144-L160
|
[
"def create_tree_from_string(line):\n \"\"\"\n Parse and convert a string representation\n of an example into a LabeledTree datastructure.\n\n Arguments:\n ----------\n line : str, string version of the tree.\n\n Returns:\n --------\n LabeledTree : parsed tree.\n \"\"\"\n depth = 0\n current_word = \"\"\n root = None\n current_node = root\n\n for char in line:\n if char == '(':\n if current_node is not None and len(current_word) > 0:\n attribute_text_label(current_node, current_word)\n current_word = \"\"\n depth += 1\n if depth > 1:\n # replace current head node by this node:\n child = LabeledTree(depth=depth)\n current_node.add_child(child)\n current_node = child\n root.add_general_child(child)\n else:\n root = LabeledTree(depth=depth)\n root.add_general_child(root)\n current_node = root\n\n elif char == ')':\n # assign current word:\n if len(current_word) > 0:\n attribute_text_label(current_node, current_word)\n current_word = \"\"\n\n # go up a level:\n depth -= 1\n if current_node.parent != None:\n current_node.parent.udepth = max(current_node.udepth+1, current_node.parent.udepth)\n current_node = current_node.parent\n else:\n # add to current read word\n current_word += char\n if depth != 0:\n raise ParseError(\"Not an equal amount of closing and opening parentheses\")\n\n return root\n"
] |
import codecs
import os
from collections import OrderedDict
from .labeled_trees import LabeledTree
from .download import download_sst
from .utils import makedirs, normalize_string
class ParseError(ValueError):
pass
def attribute_text_label(node, current_word):
"""
Tries to recover the label inside a string
of the form '(3 hello)' where 3 is the label,
and hello is the string. Label is not assigned
if the string does not follow the expected
format.
Arguments:
----------
node : LabeledTree, current node that should
possibly receive a label.
current_word : str, input string.
"""
node.text = normalize_string(current_word)
node.text = node.text.strip(" ")
node.udepth = 1
if len(node.text) > 0 and node.text[0].isdigit():
split_sent = node.text.split(" ", 1)
label = split_sent[0]
if len(split_sent) > 1:
text = split_sent[1]
node.text = text
if all(c.isdigit() for c in label):
node.label = int(label)
else:
text = label + " " + text
node.text = text
if len(node.text) == 0:
node.text = None
def create_tree_from_string(line):
"""
Parse and convert a string representation
of an example into a LabeledTree datastructure.
Arguments:
----------
line : str, string version of the tree.
Returns:
--------
LabeledTree : parsed tree.
"""
depth = 0
current_word = ""
root = None
current_node = root
for char in line:
if char == '(':
if current_node is not None and len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
depth += 1
if depth > 1:
# replace current head node by this node:
child = LabeledTree(depth=depth)
current_node.add_child(child)
current_node = child
root.add_general_child(child)
else:
root = LabeledTree(depth=depth)
root.add_general_child(root)
current_node = root
elif char == ')':
# assign current word:
if len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
# go up a level:
depth -= 1
if current_node.parent != None:
current_node.parent.udepth = max(current_node.udepth+1, current_node.parent.udepth)
current_node = current_node.parent
else:
# add to current read word
current_word += char
if depth != 0:
raise ParseError("Not an equal amount of closing and opening parentheses")
return root
class LabeledTreeCorpus(list):
"""
Read in the Stanford Sentiment Treebank using the original serialization format:
> (3 (2 this) (3 (2 is) (3 good ) )
"""
def labels(self):
"""
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
"""
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings
def to_file(self, path, mode="w"):
"""
Save the corpus to a text file in the
original format.
Arguments:
----------
path : str, where to save the corpus.
mode : str, how to open the file.
"""
with open(path, mode=mode) as f:
for tree in self:
for label, line in tree.to_labeled_lines():
f.write(line + "\n")
def load_sst(path=None,
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
"""
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
"""
if path is None:
# find a good temporary path
path = os.path.expanduser("~/stanford_sentiment_treebank/")
makedirs(path, exist_ok=True)
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for key, value in fnames.items()}
|
JonathanRaiman/pytreebank
|
pytreebank/parse.py
|
load_sst
|
python
|
def load_sst(path=None,
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
if path is None:
# find a good temporary path
path = os.path.expanduser("~/stanford_sentiment_treebank/")
makedirs(path, exist_ok=True)
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for key, value in fnames.items()}
|
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L163-L187
|
[
"def download_sst(path, url):\n \"\"\"\"\n Download from `url` the zip file corresponding to the\n Stanford Sentiment Treebank and expand the resulting\n files into the directory `path` (Note: if the files are\n already present, the download is not actually run).\n\n Arguments\n ---------\n path : str, directory to save the train, test, and dev files.\n url : str, location of zip file on the web\n\n Returns:\n --------\n\n dict<str, str>: file path for the keys train, test, dev.\n\n \"\"\"\n local_files = {\n \"train\": join(path, \"train.txt\"),\n \"test\": join(path, \"test.txt\"),\n \"dev\": join(path, \"dev.txt\")\n }\n makedirs(path, exist_ok=True)\n if all(exists(fname) and stat(fname).st_size > 100 for fname in local_files.values()):\n return local_files\n\n zip_local = join(path, 'trainDevTestTrees_PTB.zip')\n delete_paths([zip_local, join(path, \"trees\")] + list(local_files.values()))\n utils.urlretrieve(url, zip_local)\n ZipFile(zip_local).extractall(path)\n for fname in local_files.values():\n move(join(path, 'trees', fname.split('/')[-1]), fname)\n delete_paths([zip_local, join(path, 'trainDevTestTrees_PTB', 'trees'), join(path, 'trainDevTestTrees_PTB')])\n return local_files\n"
] |
import codecs
import os
from collections import OrderedDict
from .labeled_trees import LabeledTree
from .download import download_sst
from .utils import makedirs, normalize_string
class ParseError(ValueError):
pass
def attribute_text_label(node, current_word):
"""
Tries to recover the label inside a string
of the form '(3 hello)' where 3 is the label,
and hello is the string. Label is not assigned
if the string does not follow the expected
format.
Arguments:
----------
node : LabeledTree, current node that should
possibly receive a label.
current_word : str, input string.
"""
node.text = normalize_string(current_word)
node.text = node.text.strip(" ")
node.udepth = 1
if len(node.text) > 0 and node.text[0].isdigit():
split_sent = node.text.split(" ", 1)
label = split_sent[0]
if len(split_sent) > 1:
text = split_sent[1]
node.text = text
if all(c.isdigit() for c in label):
node.label = int(label)
else:
text = label + " " + text
node.text = text
if len(node.text) == 0:
node.text = None
def create_tree_from_string(line):
"""
Parse and convert a string representation
of an example into a LabeledTree datastructure.
Arguments:
----------
line : str, string version of the tree.
Returns:
--------
LabeledTree : parsed tree.
"""
depth = 0
current_word = ""
root = None
current_node = root
for char in line:
if char == '(':
if current_node is not None and len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
depth += 1
if depth > 1:
# replace current head node by this node:
child = LabeledTree(depth=depth)
current_node.add_child(child)
current_node = child
root.add_general_child(child)
else:
root = LabeledTree(depth=depth)
root.add_general_child(root)
current_node = root
elif char == ')':
# assign current word:
if len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
# go up a level:
depth -= 1
if current_node.parent != None:
current_node.parent.udepth = max(current_node.udepth+1, current_node.parent.udepth)
current_node = current_node.parent
else:
# add to current read word
current_word += char
if depth != 0:
raise ParseError("Not an equal amount of closing and opening parentheses")
return root
class LabeledTreeCorpus(list):
"""
Read in the Stanford Sentiment Treebank using the original serialization format:
> (3 (2 this) (3 (2 is) (3 good ) )
"""
def labels(self):
"""
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
"""
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings
def to_file(self, path, mode="w"):
"""
Save the corpus to a text file in the
original format.
Arguments:
----------
path : str, where to save the corpus.
mode : str, how to open the file.
"""
with open(path, mode=mode) as f:
for tree in self:
for label, line in tree.to_labeled_lines():
f.write(line + "\n")
def import_tree_corpus(path):
"""
Import a text file of treebank trees.
Arguments:
----------
path : str, filename for tree corpus.
Returns:
--------
list<LabeledTree> : loaded examples.
"""
tree_list = LabeledTreeCorpus()
with codecs.open(path, "r", "UTF-8") as f:
for line in f:
tree_list.append(create_tree_from_string(line))
return tree_list
|
JonathanRaiman/pytreebank
|
pytreebank/parse.py
|
LabeledTreeCorpus.labels
|
python
|
def labels(self):
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings
|
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L112-L124
| null |
class LabeledTreeCorpus(list):
"""
Read in the Stanford Sentiment Treebank using the original serialization format:
> (3 (2 this) (3 (2 is) (3 good ) )
"""
def to_file(self, path, mode="w"):
"""
Save the corpus to a text file in the
original format.
Arguments:
----------
path : str, where to save the corpus.
mode : str, how to open the file.
"""
with open(path, mode=mode) as f:
for tree in self:
for label, line in tree.to_labeled_lines():
f.write(line + "\n")
|
JonathanRaiman/pytreebank
|
pytreebank/parse.py
|
LabeledTreeCorpus.to_file
|
python
|
def to_file(self, path, mode="w"):
with open(path, mode=mode) as f:
for tree in self:
for label, line in tree.to_labeled_lines():
f.write(line + "\n")
|
Save the corpus to a text file in the
original format.
Arguments:
----------
path : str, where to save the corpus.
mode : str, how to open the file.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L127-L140
| null |
class LabeledTreeCorpus(list):
"""
Read in the Stanford Sentiment Treebank using the original serialization format:
> (3 (2 this) (3 (2 is) (3 good ) )
"""
def labels(self):
"""
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
"""
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings
|
JonathanRaiman/pytreebank
|
pytreebank/treelstm.py
|
import_tree_corpus
|
python
|
def import_tree_corpus(labels_path, parents_path, texts_path):
with codecs.open(labels_path, "r", "UTF-8") as f:
label_lines = f.readlines()
with codecs.open(parents_path, "r", "UTF-8") as f:
parent_lines = f.readlines()
with codecs.open(texts_path, "r", "UTF-8") as f:
word_lines = f.readlines()
assert len(label_lines) == len(parent_lines)
assert len(label_lines) == len(word_lines)
trees = []
for labels, parents, words in zip(label_lines, parent_lines, word_lines):
labels = [int(l) + 2 for l in labels.strip().split(" ")]
parents = [int(l) for l in parents.strip().split(" ")]
words = words.strip().split(" ")
assert len(labels) == len(parents)
trees.append(read_tree(parents, labels, words))
return trees
|
Import dataset from the TreeLSTM data generation scrips.
Arguments:
----------
labels_path : str, where are labels are stored (should be in
data/sst/labels.txt).
parents_path : str, where the parent relationships are stored
(should be in data/sst/parents.txt).
texts_path : str, where are strings for each tree are stored
(should be in data/sst/sents.txt).
Returns:
--------
list<LabeledTree> : loaded example trees.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/treelstm.py#L8-L42
|
[
"def read_tree(parents, labels, words):\n \"\"\"\n Take as input a list of integers for parents\n and labels, along with a list of words, and\n reconstruct a LabeledTree.\n \"\"\"\n trees = {}\n root = None\n for i in range(1, len(parents) + 1):\n if not i in trees and parents[i - 1] != - 1:\n idx = i\n prev = None\n while True:\n parent = parents[idx - 1]\n if parent == -1:\n break\n tree = LabeledTree()\n if prev is not None:\n tree.add_child(prev)\n trees[idx] = tree\n tree.label = labels[idx - 1]\n if trees.get(parent) is not None:\n trees[parent].add_child(tree)\n break\n elif parent == 0:\n root = tree\n break\n else:\n prev = tree\n idx = parent\n assert assign_texts(root, words) == len(words)\n return root\n"
] |
"""
Special loading methods for importing dataset as processed
by the TreeLSTM code from https://github.com/stanfordnlp/treelstm
"""
from .labeled_trees import LabeledTree
import codecs
def assign_texts(node, words, next_idx=0):
"""
Recursively assign the words to nodes by finding and
assigning strings to the leaves of a tree in left
to right order.
"""
if len(node.children) == 0:
node.text = words[next_idx]
return next_idx + 1
else:
for child in node.children:
next_idx = assign_texts(child, words, next_idx)
return next_idx
def read_tree(parents, labels, words):
"""
Take as input a list of integers for parents
and labels, along with a list of words, and
reconstruct a LabeledTree.
"""
trees = {}
root = None
for i in range(1, len(parents) + 1):
if not i in trees and parents[i - 1] != - 1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = LabeledTree()
if prev is not None:
tree.add_child(prev)
trees[idx] = tree
tree.label = labels[idx - 1]
if trees.get(parent) is not None:
trees[parent].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
assert assign_texts(root, words) == len(words)
return root
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.