id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3307858 | for _ in range(int(input())):
n=int(input())
l=list(map(int,input().split()))
if n%2==0:
for i in range(0,n,2):
print(l[i+1],-l[i],end=" ")
print()
else:
temp=n-3
for i in range(0,temp,2):
print(l[i+1],-l[i],end=" ")
if l[temp]+l[temp+1]!=0:
print(l[temp+2],l[temp+2],-(l[temp]+l[temp+1]))
elif l[temp]+l[temp+2]!=0:
print(l[temp+1],-(l[temp]+l[temp+2]),l[temp+1])
else:
print(-(l[temp+1]+l[temp+2]),l[temp],l[temp])
| StarcoderdataPython |
3258304 | <gh_stars>0
primeiro=int(input('Primeiro termo:'))
razao=int(input('Razão:'))
decimo=primeiro+(10-1)*razao
for c in range(primeiro,decimo+razao,razao):
print('{}'.format(c), end='->')
print('ACABOU')
#progressão aritmética (PA) | StarcoderdataPython |
3241541 | <gh_stars>1-10
from sklearn.metrics import (
mean_squared_error,
mean_absolute_error,
)
from pmdarima.metrics import smape
from tsaugur.metrics.custom_metrics import (
mean_absolute_percentage_error,
root_mean_squared_error,
)
METRIC_KEY_MSE = "mse"
METRIC_KEY_MAE = "mae"
METRIC_KEY_RMSE = "rmse"
METRIC_KEY_MAPE = "mape"
METRIC_KEY_SMAPE = "smape"
METRICS = {
METRIC_KEY_MSE: mean_squared_error,
METRIC_KEY_MAE: mean_absolute_error,
METRIC_KEY_RMSE: root_mean_squared_error,
METRIC_KEY_MAPE: mean_absolute_percentage_error,
METRIC_KEY_SMAPE: smape,
}
def get_metric(metric_key):
"""
Return a model performance metric function.
:param metric_key: Str, a unique identifier of a model performance metric.
:return: A metric function object.
"""
return METRICS[metric_key]
| StarcoderdataPython |
1665678 | class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
ans = 0
currentEnd = -math.inf
for interval in sorted(intervals, key=lambda x: x[1]):
if interval[0] >= currentEnd:
currentEnd = interval[1]
else:
ans += 1
return ans
| StarcoderdataPython |
3357078 | #!/usr/bin/env python
# encoding: utf-8
"""Azkaban CLI: a lightweight command line interface for Azkaban.
Usage:
azkaban build [-cp PROJECT] [-a ALIAS | -u URL | [-r] ZIP] [-o OPTION ...]
azkaban info [-p PROJECT] [-f | -o OPTION ... | [-i] JOB ...]
azkaban log [-a ALIAS | -u URL] EXECUTION [JOB]
azkaban run [-bkp PROJECT] [-a ALIAS | -u URL] [-e EMAIL ...]
[-o OPTION ...] FLOW [JOB ...]
azkaban schedule [-bkp PROJECT] [-a ALIAS | -u URL] [-e EMAIL ...]
[-o OPTION ...] [-s SPAN] (-d DATE) (-t TIME)
FLOW [JOB ...]
azkaban upload [-cp PROJECT] [-a ALIAS | -u URL] ZIP
azkaban -h | --help | -l | --log | -v | --version
Commmands:
build* Build project and upload to Azkaban or save
locally the resulting archive.
info* View information about jobs or files.
log View workflow or job execution logs.
run Run jobs or workflows. If no job is specified,
the entire workflow will be executed.
schedule Schedule a workflow to be run at a specified
date and time.
upload Upload archive to Azkaban server.
Arguments:
EXECUTION Execution ID.
JOB Job name.
FLOW Workflow name. Recall that in the Azkaban world
this is simply a job without children.
ZIP For `upload` command, the path to an existing
project zip archive. For `build`, the path
where the output archive will be built. If it
points to a directory, the archive will be
named after the project name (and version, if
present) and created in said directory.
Options:
-a ALIAS --alias=ALIAS Alias to saved URL and username. Will also try
to reuse session IDs for later connections.
-b --bounce Skip execution if workflow is already running.
-c --create Create the project if it does not exist.
-d DATE --date=DATE Date used for first run of a schedule. It must
be in the format `MM/DD/YYYY`.
-e EMAIL --email=EMAIL Email address to be notified when the workflow
finishes (can be specified multiple times).
-f --files List project files instead of jobs. The first
column is the local path of the file, the
second the path of the file in the archive.
-h --help Show this message and exit.
-i --include-properties Include project properties with job options.
-k --kill Kill worfklow on first job failure.
-l --log Show path to current log file and exit.
-o OPTION --option=OPTION Azkaban properties. Can either be the path to
a properties file or a single parameter
formatted as `key=value`, e.g. `-o
user.to.proxy=foo`. For the `build` and `run`
commands, these will be added to the project's
or run's properties respectively (potentially
overriding existing ones). For the `info`
command, this will cause only jobs with these
exact parameters to be displayed.
-p PROJECT --project=PROJECT Azkaban project. Can either be a project name
or a path to a python module/package defining
an `azkaban.Project` instance. Commands which
are followed by an asterisk will only work in
the latter case. If multiple projects are
registered, you can disambiguate as follows:
`--project=module:project_name`.
-r --replace Overwrite any existing file.
-s SPAN --span=SPAN Period to repeat the scheduled flow. Must be
in format `1d`, a combination of magnitude and
unit of repetition. If not specified, the flow
will be run only once.
-t TIME --time=TIME Time when a schedule should be run. Must be of
the format `hh,mm,(AM|PM),(PDT|UTC|..)`.
-u URL --url=URL Azkaban endpoint (with protocol, and optionally
a username): '[user@]protocol:endpoint'. E.g.
'http://azkaban.server'. The username defaults
to the current user, as determined by `whoami`.
If you often use the same url, consider using
the `--alias` option instead.
-v --version Show version and exit.
Azkaban CLI returns with exit code 1 if an error occurred and 0 otherwise.
"""
from azkaban import __version__, CLI_ARGS
from azkaban.project import Project
from azkaban.remote import Execution, Session
from azkaban.util import (AzkabanError, Config, catch, flatten, human_readable,
temppath, read_properties, write_properties)
from docopt import docopt
from requests.exceptions import HTTPError
import logging as lg
import os
import os.path as osp
import sys
_logger = lg.getLogger(__name__)
def _forward(args, names):
"""Forward subset of arguments from initial dictionary.
:param args: Dictionary of parsed arguments (output of `docopt.docopt`).
:param names: List of names that will be included.
"""
names = set(names)
return dict(
('_%s' % (k.lower().lstrip('-').replace('-', '_'), ), v)
for (k, v) in args.items() if k in names
)
def _parse_option(_option):
"""Parse `--option` argument.
:param _option: `--option` argument.
Returns a dictionary.
"""
paths = (opt for opt in _option if not '=' in opt)
opts = read_properties(*paths)
try:
opts.update(dict(s.split('=', 1) for s in _option if '=' in s))
except ValueError:
raise AzkabanError('Invalid `--option` flag.')
return opts
def _parse_project(_project, require_project=False):
"""Parse `--project` argument into `(name, project)`.
:param _project: `--project` argument.
:param require_project: Fail if we fail to load the project.
Note that `name` is guaranteed to be non-`None` (this function will throw an
exception otherwise) but `project` can be.
The rules are as follows:
+ If at least one `':'` is found in `_project` then the rightmost one is
interpreted as delimitor between the path to the module and the project
name.
+ Else:
+ We first try to interpret `_project` as a module path and find a unique
project inside.
+ If the above attempt raises an `ImportError`, we interpret it as a name.
"""
default_module = Config().get_option('azkaban', 'project', 'jobs')
projects = {}
_project = _project or default_module
if ':' in _project:
# unambiguous case
path, name = _project.rsplit(':', 1)
try:
projects = Project.load(path or default_module)
# adding the default here lets options like `-p :name` work as intended
except ImportError:
pass
else:
# the option could be a name or module
try:
# try first as a module
projects = Project.load(_project)
except ImportError:
# if that fails, try as a name: load the default module and look there
name = _project
try:
projects = Project.load(default_module)
except ImportError:
pass
else:
name = None
if name:
if name in projects:
return name, projects[name]
elif projects:
# harder consistency requirement
raise AzkabanError(
'Project %r not found. Available projects: %s\n'
'You can also specify another location using the `--project` option.'
% (name, ', '.join(projects))
)
elif require_project:
raise AzkabanError(
'This command requires a project configuration module.\n'
'You can specify another location using the `--project` option.'
)
else:
return name, None
else:
if not projects:
raise AzkabanError(
'No registered project found in %r.\n'
'You can also specify another location using the `--project` option.'
% (_project, )
)
elif len(projects) > 1:
raise AzkabanError(
'Multiple registered projects found: %s\n'
'You can use the `--project` option to disambiguate.'
% (', '.join(projects), )
)
else:
return projects.popitem()
def _get_project_name(_project):
"""Return project name.
:param _project: `--project` argument.
"""
return _parse_project(_project)[0]
def _load_project(_project):
"""Resolve project from CLI argument.
:param _project: `--project` argument.
"""
try:
name, project = _parse_project(_project, require_project=True)
except ImportError:
raise AzkabanError(
'This command requires a project configuration module which was not '
'found.\nYou can specify another location using the `--project` option.'
)
else:
return project
def _upload_callback(cur_bytes, tot_bytes, file_index, _stdout=sys.stdout):
"""Callback for streaming upload.
:param cur_bytes: Total bytes uploaded so far.
:param tot_bytes: Total bytes to be uploaded.
:param file_index: (0-based) index of the file currently uploaded.
:param _stdout: Performance caching.
"""
if cur_bytes != tot_bytes:
_stdout.write(
'Uploading project: %.1f%%\r'
% (100. * cur_bytes / tot_bytes, )
)
else:
_stdout.write('Validating project... \r')
_stdout.flush()
def view_info(project, _files, _option, _job, _include_properties):
"""List jobs in project."""
if _job:
if _include_properties:
write_properties(
flatten(project.properties),
header='project.properties'
)
for name in _job:
project.jobs[name].build(header='%s.job' % (name, ))
elif _files:
for path, archive_path in sorted(project.files):
sys.stdout.write('%s\t%s\n' % (osp.relpath(path), archive_path))
else:
options = _parse_option(_option).items()
jobs = sorted(project.jobs.items())
dependencies = set(
dep
for _, job in jobs
for dep in job.options.get('dependencies', '').split(',')
)
for name, job in jobs:
if all(job.options.get(k) == v for k, v in options):
sys.stdout.write(
'%s\t%s\n'
% ('J' if name in dependencies else 'F', name, )
)
def view_log(_execution, _job, _url, _alias):
"""View workflow or job execution logs."""
session = Session(_url, _alias)
exc = Execution(session, _execution)
logs = exc.job_logs(_job[0]) if _job else exc.logs()
try:
for line in logs:
sys.stdout.write('%s\n' % (line.encode('utf-8'), ))
except HTTPError:
# Azkaban responds with 500 if the execution or job isn't found
if _job:
raise AzkabanError(
'Execution %s and/or job %s not found.', _execution, _job
)
else:
raise AzkabanError('Execution %s not found.', _execution)
def run_workflow(project_name, _flow, _job, _url, _alias, _bounce, _kill,
_email, _option):
"""Run workflow."""
session = Session(_url, _alias)
res = session.run_workflow(
name=project_name,
flow=_flow,
jobs=_job,
concurrent=not _bounce,
on_failure='cancel' if _kill else 'finish',
emails=_email,
properties=_parse_option(_option),
)
exec_id = res['execid']
job_names = ', jobs: %s' % (', '.join(_job), ) if _job else ''
sys.stdout.write(
'Flow %s successfully submitted (execution id: %s%s).\n'
'Details at %s/executor?execid=%s\n'
% (_flow, exec_id, job_names, session.url, exec_id)
)
def schedule_workflow(project_name, _date, _time, _span, _flow, _job, _url,
_alias, _bounce, _kill, _email, _option):
"""Schedule workflow."""
session = Session(_url, _alias)
session.schedule_workflow(
name=project_name,
flow=_flow,
date=_date,
time=_time,
period=_span,
jobs=_job,
concurrent=not _bounce,
on_failure='cancel' if _kill else 'finish',
emails=_email,
properties=_parse_option(_option),
)
sys.stdout.write(
'Flow %s scheduled successfully.\n' % (_flow, )
)
def upload_project(project_name, _zip, _url, _alias, _create):
"""Upload project."""
session = Session(_url, _alias)
while True:
try:
res = session.upload_project(
name=project_name,
path=_zip,
callback=_upload_callback
)
except AzkabanError as err:
if _create:
session.create_project(project_name, project_name)
else:
raise err
else:
break
sys.stdout.write(
'Project %s successfully uploaded (id: %s, size: %s, version: %s).\n'
'Details at %s/manager?project=%s\n'
% (
project_name,
res['projectId'],
human_readable(osp.getsize(_zip)),
res['version'],
session.url,
project_name,
)
)
def build_project(project, _zip, _url, _alias, _replace, _create, _option):
"""Build project."""
if _option:
project.properties = flatten(project.properties)
# to make sure we properly override nested options, we flatten first
project.properties.update(_parse_option(_option))
if _zip:
if osp.isdir(_zip):
_zip = osp.join(_zip, '%s.zip' % (project.versioned_name, ))
project.build(_zip, overwrite=_replace)
sys.stdout.write(
'Project %s successfully built and saved as %r (size: %s).\n'
% (project, _zip, human_readable(osp.getsize(_zip)))
)
else:
with temppath() as _zip:
project.build(_zip)
archive_name = '%s.zip' % (project.versioned_name, )
session = Session(_url, _alias)
while True:
try:
res = session.upload_project(
name=project.name,
path=_zip,
archive_name=archive_name,
callback=_upload_callback
)
except AzkabanError as err:
if _create and str(err).endswith("doesn't exist."):
session.create_project(project.name, project.name)
else:
raise err
else:
break
sys.stdout.write(
'Project %s successfully built and uploaded '
'(id: %s, size: %s, upload: %s).\n'
'Details at %s/manager?project=%s\n'
% (
project,
res['projectId'],
human_readable(osp.getsize(_zip)),
res['version'],
session.url,
project,
)
)
@catch(AzkabanError)
def main(argv=None):
"""Entry point."""
# enable general logging
logger = lg.getLogger()
logger.setLevel(lg.DEBUG)
handler = Config().get_file_handler('azkaban')
if handler:
logger.addHandler(handler)
# parse arguments
argv = argv or sys.argv[1:]
_logger.debug('Running command %r from %r.', ' '.join(argv), os.getcwd())
args = docopt(__doc__, version=__version__)
CLI_ARGS.update(args)
# do things
if args['--log']:
if handler:
sys.stdout.write('%s\n' % (handler.baseFilename, ))
else:
raise AzkabanError('No log file active.')
elif args['build']:
build_project(
_load_project(args['--project']),
**_forward(
args,
['ZIP', '--url', '--alias', '--replace', '--create', '--option']
)
)
elif args['log']:
view_log(
**_forward(args, ['EXECUTION', 'JOB', '--url', '--alias'])
)
elif args['info']:
view_info(
_load_project(args['--project']),
**_forward(args, ['--files', '--option', 'JOB', '--include-properties'])
)
elif args['run']:
run_workflow(
_get_project_name(args['--project']),
**_forward(
args,
[
'FLOW', 'JOB', '--bounce', '--url', '--alias', '--kill', '--email',
'--option',
]
)
)
elif args['schedule']:
schedule_workflow(
_get_project_name(args['--project']),
**_forward(
args,
[
'FLOW', 'JOB', '--bounce', '--url', '--alias', '--kill',
'--email', '--option', '--date', '--time', '--span'
]
)
)
elif args['upload']:
upload_project(
_get_project_name(args['--project']),
**_forward(args, ['ZIP', '--create', '--url', '--alias'])
)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4801183 | import sightlines as los
import numpy as np
def test_closest_int():
nice_z_r_list = [(0,0,10), (1,0,11), (2,0,12), (3,0,13), (4,0,14),
(5,0,15), (6,0,16), (7,0,17), (8,0,18), (9,0,19)]
cell_index, dist = los.closest_cell_to(3.1, nice_z_r_list)
assert(cell_index == 3)
np.testing.assert_almost_equal(dist, 0.1)
def test_tie_breaker():
nice_z_r_list = [(0,0,10), (1,0,11), (2,0,12), (3,0,13), (4,0,14),
(5,0,15), (6,0,16), (7,0,17), (8,0,18), (9,0,19)]
cell_index, dist = los.closest_cell_to(3.5, nice_z_r_list)
assert(cell_index == 3) # In the event of a tie, the first min is chosen.
np.testing.assert_almost_equal(dist, 0.5)
def test_empty():
cell_index, dist = los.closest_cell_to(3.5, [])
assert(cell_index == None)
assert(dist == None) | StarcoderdataPython |
1772961 | <reponame>GuillaumeRochette/HumanViewSynthesis
from typing import Tuple, Union
import torch
from torchvision.transforms import ToTensor as ImageToTensor
from data.transforms import (
MaskToTensor,
DynamicSquareCrop,
Resize,
stabilized_padding,
)
from data.Human36M.skeleton import JOINTS
from data.Human36M.statistics import MEDIAN_PIXEL
from geometry.extrinsic import world_to_camera
class Human36MImageTransform(object):
def __init__(
self,
cr_margin: float = None,
re_size: Union[int, Tuple[int, int]] = None,
):
self.i2t = ImageToTensor()
self.m2t = MaskToTensor()
self.crop = None
if cr_margin is not None:
self.crop = DynamicSquareCrop(margin=cr_margin)
self.resize = None
if re_size is not None:
self.resize = Resize(size=re_size)
def __call__(self, input: dict) -> dict:
p = input["pose_2d"]
c = torch.ones(len(JOINTS), 1, 1, dtype=torch.bool)
input["pose_2d"] = {
"p": p,
"c": c,
}
p = input["pose_3d"]
p = world_to_camera(
xyz=p,
R=input["R"][None, :, :],
t=input["t"][None, :, :],
)
c = torch.ones(len(JOINTS), 1, 1, dtype=torch.bool)
input["pose_3d"] = {
"root": {
"p": p[JOINTS["HipCenter"] : JOINTS["HipCenter"] + 1, :, :],
"c": c[JOINTS["HipCenter"] : JOINTS["HipCenter"] + 1, :, :],
},
"relative": {
"p": p - p[JOINTS["HipCenter"] : JOINTS["HipCenter"] + 1, :, :],
"c": c & c[JOINTS["HipCenter"] : JOINTS["HipCenter"] + 1, :, :],
},
}
x_off, y_off = 0.0, 0.0
if self.crop:
p, c = input["pose_2d"]["p"], input["pose_2d"]["c"]
points = p[c.expand_as(p)].reshape(-1, 2, 1)
points = points if len(points) > 0 else p
(m, _), (M, _) = points.min(dim=-3), points.max(dim=-3)
x_m, y_m = m.squeeze(dim=-1).tolist()
x_M, y_M = M.squeeze(dim=-1).tolist()
input["image"], (x_off, y_off) = self.crop(
image=input["image"],
bounds=(x_m, y_m, x_M, y_M),
)
input["mask"], (_, _) = self.crop(
image=input["mask"],
bounds=(x_m, y_m, x_M, y_M),
)
input["K"] = input["K"] - torch.tensor(
[
[0.0, 0.0, x_off],
[0.0, 0.0, y_off],
[0.0, 0.0, 0.0],
]
)
# input["pose_2d"]["p"] = input["pose_2d"]["p"] - torch.tensor([[x_off], [y_off]])
input["crop_offset"] = torch.tensor([x_off, y_off])
input["cropped_resolution"] = torch.tensor(input["image"].size)
if self.resize:
input["image"], (w_r, h_r) = self.resize(image=input["image"])
input["mask"], (_, _) = self.resize(image=input["mask"])
input["K"] = input["K"] * torch.tensor(
[
[w_r, 1.0, w_r],
[1.0, h_r, h_r],
[1.0, 1.0, 1.0],
]
)
# input["pose_2d"]["p"] = input["pose_2d"]["p"] * torch.tensor([[w_r], [h_r]])
input["resized_resolution"] = torch.tensor(input["image"].size)
input["stabilized_padding"] = stabilized_padding(
crop_offset=input["crop_offset"],
original_resolution=input["resolution"],
cropped_resolution=input["cropped_resolution"],
resized_resolution=input["resized_resolution"],
)
input["image"] = self.i2t(input["image"])
input["mask"] = self.m2t(input["mask"])
# input["masked_image"] = input["mask"] * input["image"]
input["masked_image"] = (
input["mask"] * input["image"]
+ ~input["mask"] * MEDIAN_PIXEL[..., None, None]
)
return input
class Human36MImagePairTransform(object):
def __init__(
self,
cr_margin_A: Union[float, Tuple[float, float]] = None,
cr_margin_B: Union[float, Tuple[float, float]] = None,
re_size_A: Union[int, Tuple[int, int]] = None,
re_size_B: Union[int, Tuple[int, int]] = None,
):
self.i2t = ImageToTensor()
self.m2t = MaskToTensor()
self.views = ["A", "B"]
self.crop = {
"A": None,
"B": None,
}
if cr_margin_A is not None:
self.crop["A"] = DynamicSquareCrop(margin=cr_margin_A)
if cr_margin_B is not None:
self.crop["B"] = DynamicSquareCrop(margin=cr_margin_B)
self.resize = {
"A": None,
"B": None,
}
if re_size_A is not None:
self.resize["A"] = Resize(size=re_size_A)
if re_size_B is not None:
self.resize["B"] = Resize(size=re_size_B)
def __call__(self, input: dict) -> dict:
for v in self.views:
p = input[v]["pose_2d"]
c = torch.ones(len(JOINTS), 1, 1, dtype=torch.bool)
input[v]["pose_2d"] = {
"p": p,
"c": c,
}
p = input["W"]["pose_3d"]
p = world_to_camera(
xyz=p,
R=input[v]["R"][None, :, :],
t=input[v]["t"][None, :, :],
)
c = torch.ones(len(JOINTS), 1, 1, dtype=torch.bool)
input[v]["pose_3d"] = {
"root": {
"p": p[JOINTS["HipCenter"] : JOINTS["HipCenter"] + 1, :, :],
"c": c[JOINTS["HipCenter"] : JOINTS["HipCenter"] + 1, :, :],
},
"relative": {
"p": p - p[JOINTS["HipCenter"] : JOINTS["HipCenter"] + 1, :, :],
"c": c & c[JOINTS["HipCenter"] : JOINTS["HipCenter"] + 1, :, :],
},
}
x_off, y_off = 0.0, 0.0
if self.crop[v]:
p, c = input[v]["pose_2d"]["p"], input[v]["pose_2d"]["c"]
points = p[c.expand_as(p)].reshape(-1, 2, 1)
points = points if len(points) > 0 else p
(m, _), (M, _) = points.min(dim=-3), points.max(dim=-3)
x_m, y_m = m.squeeze(dim=-1).tolist()
x_M, y_M = M.squeeze(dim=-1).tolist()
input[v]["image"], (x_off, y_off) = self.crop[v](
image=input[v]["image"],
bounds=(x_m, y_m, x_M, y_M),
)
input[v]["mask"], (_, _) = self.crop[v](
image=input[v]["mask"],
bounds=(x_m, y_m, x_M, y_M),
)
input[v]["K"] = input[v]["K"] - torch.tensor(
[
[0.0, 0.0, x_off],
[0.0, 0.0, y_off],
[0.0, 0.0, 0.0],
]
)
# input[v]["pose_2d"]["p"] = input[v]["pose_2d"]["p"] - torch.tensor([[x_off], [y_off]])
input[v]["crop_offset"] = torch.tensor([x_off, y_off])
input[v]["cropped_resolution"] = torch.tensor(input[v]["image"].size)
if self.resize[v]:
input[v]["image"], (w_r, h_r) = self.resize[v](image=input[v]["image"])
input[v]["mask"], (_, _) = self.resize[v](image=input[v]["mask"])
input[v]["K"] = input[v]["K"] * torch.tensor(
[
[w_r, 1.0, w_r],
[1.0, h_r, h_r],
[1.0, 1.0, 1.0],
]
)
# input[v]["pose_2d"]["p"] = input[v]["pose_2d"]["p"] * torch.tensor([[w_r], [h_r]])
input[v]["resized_resolution"] = torch.tensor(input[v]["image"].size)
input[v]["stabilized_padding"] = stabilized_padding(
crop_offset=input[v]["crop_offset"],
original_resolution=input[v]["resolution"],
cropped_resolution=input[v]["cropped_resolution"],
resized_resolution=input[v]["resized_resolution"],
)
input[v]["image"] = self.i2t(input[v]["image"])
input[v]["mask"] = self.m2t(input[v]["mask"])
# input[v]["masked_image"] = input[v]["mask"] * input[v]["image"]
input[v]["masked_image"] = (
input[v]["mask"] * input[v]["image"]
+ ~input[v]["mask"] * MEDIAN_PIXEL[..., None, None]
)
return input
| StarcoderdataPython |
1735412 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import _astype_copy_false
class WeightsComputer:
'''
Weight methods:
idf : log( (1 + n) / (1 + df(t)) ) + 1
dfs : Distinguishing feature selector
chi2 : Term Weighting Based on Chi-Square Statistic
ig : Term weighting based on information gain
igm: Term Weighting Based on Inverse Gravity Moment
pb : Probability-Based Term Weighting
idf_icf : Term Weighting Based on Inverse Class Frequency
rf : Term Weighting Based on Relevance Frequency
idf_icsdf : Term Weighting Based on Inverse Class Density Frequency
iadf : inverse average document frequency
iadf_norm : inverse average document frequency normalized
'''
def __init__(
self,
dtype,
weight_method:str,
smooth_idf: bool = True
):
try:
if type(weight_method) is tuple:
weight_method = weight_method[0]
self.method = getattr(self, weight_method)
except AttributeError:
print(f'Method {weight_method} is not implemmnted.')
print('Check the list of avaliable parameters')
self.dtype = dtype
if type(self.dtype) is tuple:
self.dtype = self.dtype[0]
self.smooth_idf = smooth_idf
self.igm_lambda = 7.0
self.cross_tab = None
@staticmethod
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
# print('type X: ', type(X))
print(X.shape)
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(X.indptr)
def make_cross_tab(self, X, y):
'''Computes Two-way contingency table of a term t
Parameters
----------
X : sparse matrix of (n_samples, n_features)
A matrix of term/token counts.
y : vector of class labels (n_samples,)
Returns
-------
np.array of shape (n_classes, 4, n_features)
'''
cross_tab = []
n_docs = X.shape[0]
classes, counts = np.unique(y, return_counts=True)
for i, cls in enumerate(classes):
cat = np.array(np.where(y==cls)).flatten()
not_cat = np.array(np.where(y!=cls)).flatten()
# Belong to cls, contain term t
a = self._document_frequency(X[cat]) + int(self.smooth_idf)
# Belong to cls, doesn`t contain term t
b = counts[i] - a + 2*int(self.smooth_idf)
# Don`t belong to cls, contain term t
c = self._document_frequency(X[not_cat]) + int(self.smooth_idf)
# Don`t belong to cls, doesn`t contain term t
d = (n_docs - counts[i]) - c + 2*int(self.smooth_idf)
cross_tab.append([a,b,c,d])
self.cross_tab = cross_tab
def idf(self, X, y):
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
n_docs= X.shape[0]
df = self._document_frequency(X)
df = df.astype(self.dtype, **_astype_copy_false(df))
# perform idf smoothing if required
df += int(self.smooth_idf)
n_docs += int(self.smooth_idf)
return np.log(n_docs / df) + 1
def dfs(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
weight_factors = []
for category in self.cross_tab:
a, b, c, d = category
nominator = a / (np.max(np.c_[a + c, np.ones_like(a + c)], axis=1))
denom_first = np.max(np.c_[a + b, np.ones_like(a + b)], axis=1)
denom_second = np.max(np.c_[c+d, np.ones_like(c+d)], axis=1)
denominator = b/denom_first + c/denom_second + 1
weight_factors.append(nominator / denominator)
return np.sum(weight_factors, axis=0)
def chi2(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
# N documents
D = X.shape[0]
weight_factors = []
for category in self.cross_tab:
a, b, c, d = category
nominator = np.square(a*d - b*c)
denominator = (a+c)*(b+d)*(a+b)*(c+d)
denominator = np.max(np.c_[denominator, np.ones_like(denominator)], axis=-1)
weight_factors.append(nominator / denominator)
return D * np.max(weight_factors, axis=0)
def ig(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
# N documents
N = X.shape[0]
weight_factors = []
for category in self.cross_tab:
a, b, c, d = category
# Add +1 to denominators to avoid Zero Division error
den_first = np.max(np.c_[(a+c)*(a+b), np.ones_like((a+c)*(a+b))], axis=-1)
first = a/N*np.log(1+(a*N)/den_first)
den_second = np.max(np.c_[(b+d)*(a+b), np.ones_like((b+d)*(a+b))], axis=-1)
second = b/N*np.log(1+(b*N)/den_second)
den_third = np.max(np.c_[(a+c)*(c+d), np.ones_like((a+c)*(c+d))], axis=-1)
third = c/N*np.log(1+(c*N)/den_third)
den_fourth = np.max(np.c_[(b+d)*(c+d), np.ones_like((b+d)*(c+d))], axis=-1)
fourth = d/N*np.log(1+(d*N)/den_fourth)
weight_factors.append(first + second + third + fourth)
return np.max(weight_factors, axis=0)
def igm(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
class_based_dfs = np.sort([cat[0] for cat in self.cross_tab], axis=0)[::-1]
n_classes = class_based_dfs.shape[0]
max_freq = np.max(class_based_dfs, axis=0)
igm = max_freq / np.sum(class_based_dfs.T @ np.arange(1, n_classes+1), axis=0)
return 1+ self.igm_lambda*igm
def pb(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
weight_factors = []
for category in self.cross_tab:
a, b, c, d = category
first = a / (np.max(np.c_[b, np.ones_like(b)], axis=1))
second = a / (np.max(np.c_[c, np.ones_like(c)], axis=1))
pb = np.log(1 + first*second)
weight_factors.append(pb)
return np.max(weight_factors, axis=0)
def idf_icf(self, X, y):
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
n_docs = X.shape[0]
df = self._document_frequency(X)
df = df.astype(self.dtype, **_astype_copy_false(df))
# perform idf smoothing if required
df += int(self.smooth_idf)
n_docs += int(self.smooth_idf)
idf = np.log(n_docs / df) + 1
n_classes = len(np.unique(y))
if self.cross_tab is None:
self.make_cross_tab(X, y)
# Number of classes where term t occures
class_factors = np.zeros(shape=(X.shape[1], )) #self.cross_tab[0][0]
for category in self.cross_tab:
a, b, c, d = category
a = a - int(self.smooth_idf)
class_factors += (a > 0)
icf = np.log((n_classes + int(self.smooth_idf))/(class_factors+int(self.smooth_idf))) + 1
self.icf_mean = np.mean(icf)
return idf*icf
def rf(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
weight_factors = []
for category in self.cross_tab:
a, b, c, d = category
rf = np.log(2 + a / (np.max(np.c_[c, np.ones_like(c)], axis=1)))
weight_factors.append(rf)
return np.max(weight_factors, axis=0)
def idf_icsdf(self, X, y):
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
n_docs = X.shape[0]
df = self._document_frequency(X)
df = df.astype(self.dtype, **_astype_copy_false(df))
# perform idf smoothing if required
df += int(self.smooth_idf)
n_docs += int(self.smooth_idf)
idf = np.log(n_docs / df) + 1
classes, counts = np.unique(y, return_counts=True)
n_classes = len(classes)
if self.cross_tab is None:
self.make_cross_tab(X, y)
class_factors = []
for i, category in enumerate(self.cross_tab):
a, b, c, d = category
a = a - int(self.smooth_idf)
D_cls = counts[i]
class_factors.append(a / D_cls)
n_classes += int(self.smooth_idf)
clf_sum = np.sum(class_factors, axis=0) # + int(self.smooth_idf)
icsdf = np.log(n_classes / clf_sum) + 1
self.icsdf_mean = np.mean(icsdf)
return idf * icsdf
def iadf(self, X, y):
D = X.shape[0]
n_terms = X.shape[1]
df = self._document_frequency(X)
df = df.astype(self.dtype, **_astype_copy_false(df))
mean_df = np.sum(df) / n_terms
adf = np.square(df - mean_df) / n_terms
return np.log((D + 1) / (adf + 1))
def iadf_norm(self, X, y):
D = X.shape[0]
n_terms = X.shape[1]
df = self._document_frequency(X)
df = df.astype(self.dtype, **_astype_copy_false(df))
mean_df = np.sum(df) / n_terms
adf = np.square(df - mean_df) / n_terms
adf_1 = np.log(1/(adf+1)) + 1
adf_2 = (adf_1 - np.min(adf_1)) / (np.max(adf_1) - np.min(adf_1))
return np.log((D+1) / (adf_2 + 1)) | StarcoderdataPython |
197632 | <filename>circus/watcher.py<gh_stars>0
import copy
import errno
import os
import signal
import time
import sys
from random import randint
try:
from itertools import zip_longest as izip_longest
except ImportError:
from itertools import izip_longest # NOQA
import site
from tornado import gen
from psutil import NoSuchProcess
import zmq.utils.jsonapi as json
from zmq.utils.strtypes import b
from zmq.eventloop import ioloop
from circus.process import Process, DEAD_OR_ZOMBIE, UNEXISTING
from circus import logger
from circus import util
from circus.stream import get_pipe_redirector, get_stream
from circus.util import parse_env_dict, resolve_name, tornado_sleep
from circus.py3compat import bytestring, is_callable
class Watcher(object):
"""
Class managing a list of processes for a given command.
Options:
- **name**: name given to the watcher. Used to uniquely identify it.
- **cmd**: the command to run. May contain *$WID*, which will be
replaced by **wid**.
- **args**: the arguments for the command to run. Can be a list or
a string. If **args** is a string, it's splitted using
:func:`shlex.split`. Defaults to None.
- **numprocesses**: Number of processes to run.
- **working_dir**: the working directory to run the command in. If
not provided, will default to the current working directory.
- **shell**: if *True*, will run the command in the shell
environment. *False* by default. **warning: this is a
security hazard**.
- **uid**: if given, is the user id or name the command should run
with. The current uid is the default.
- **gid**: if given, is the group id or name the command should run
with. The current gid is the default.
- **send_hup**: if True, a process reload will be done by sending
the SIGHUP signal. Defaults to False.
- **stop_signal**: the signal to send when stopping the process.
Defaults to SIGTERM.
- **stop_children**: send the **stop_signal** to the children too.
Defaults to False.
- **env**: a mapping containing the environment variables the command
will run with. Optional.
- **rlimits**: a mapping containing rlimit names and values that will
be set before the command runs.
- **stdout_stream**: a mapping that defines the stream for
the process stdout. Defaults to None.
Optional. When provided, *stdout_stream* is a mapping containing up to
three keys:
- **class**: the stream class. Defaults to
`circus.stream.FileStream`
- **filename**: the filename, if using a FileStream
- **max_bytes**: maximum file size, after which a new output file is
opened. defaults to 0 which means no maximum size.
- **backup_count**: how many backups to retain when rotating files
according to the max_bytes parameter. defaults to 0 which means
no backups are made.
This mapping will be used to create a stream callable of the specified
class.
Each entry received by the callable is a mapping containing:
- **pid** - the process pid
- **name** - the stream name (*stderr* or *stdout*)
- **data** - the data
- **stderr_stream**: a mapping that defines the stream for
the process stderr. Defaults to None.
Optional. When provided, *stderr_stream* is a mapping containing up to
three keys:
- **class**: the stream class. Defaults to `circus.stream.FileStream`
- **filename**: the filename, if using a FileStream
- **max_bytes**: maximum file size, after which a new output file is
opened. defaults to 0 which means no maximum size.
- **backup_count**: how many backups to retain when rotating files
according to the max_bytes parameter. defaults to 0 which means
no backups are made.
This mapping will be used to create a stream callable of the specified
class.
Each entry received by the callable is a mapping containing:
- **pid** - the process pid
- **name** - the stream name (*stderr* or *stdout*)
- **data** - the data
- **priority** -- integer that defines a priority for the watcher. When
the Arbiter do some operations on all watchers, it will sort them
with this field, from the bigger number to the smallest.
(default: 0)
- **singleton** -- If True, this watcher has a single process.
(default:False)
- **use_sockets** -- If True, the processes will inherit the file
descriptors, thus can reuse the sockets opened by circusd.
(default: False)
- **on_demand** -- If True, the processes will be started only
at the first connection to the socket
(default: False)
- **copy_env** -- If True, the environment in which circus is running
run will be reproduced for the workers. (default: False)
- **copy_path** -- If True, circusd *sys.path* is sent to the
process through *PYTHONPATH*. You must activate **copy_env** for
**copy_path** to work. (default: False)
- **max_age**: If set after around max_age seconds, the process is
replaced with a new one. (default: 0, Disabled)
- **max_age_variance**: The maximum number of seconds that can be added to
max_age. This extra value is to avoid restarting all processes at the
same time. A process will live between max_age and
max_age + max_age_variance seconds.
- **hooks**: callback functions for hooking into the watcher startup
and shutdown process. **hooks** is a dict where each key is the hook
name and each value is a 2-tuple with the name of the callable
or the callabled itself and a boolean flag indicating if an
exception occuring in the hook should not be ignored.
Possible values for the hook name: *before_start*, *after_start*,
*before_spawn*, *before_stop*, *after_stop*., *before_signal* or
*after_signal*.
- **options** -- extra options for the worker. All options
found in the configuration file for instance, are passed
in this mapping -- this can be used by plugins for watcher-specific
options.
- **respawn** -- If set to False, the processes handled by a watcher will
not be respawned automatically. (default: True)
- **virtualenv** -- The root directory of a virtualenv. If provided, the
watcher will load the environment for its execution. (default: None)
- **close_child_stdout**: If True, closes the stdout after the fork.
default: False.
- **close_child_stderr**: If True, closes the stderr after the fork.
default: False.
"""
def __init__(self, name, cmd, args=None, numprocesses=1, warmup_delay=0.,
working_dir=None, shell=False, uid=None, max_retry=5,
gid=None, send_hup=False, stop_signal=signal.SIGTERM,
stop_children=False, env=None, graceful_timeout=30.0,
prereload_fn=None, rlimits=None, executable=None,
stdout_stream=None, stderr_stream=None, priority=0,
loop=None, singleton=False, use_sockets=False,
copy_env=False, copy_path=False, max_age=0,
max_age_variance=30, hooks=None, respawn=True,
autostart=True, on_demand=False, virtualenv=None,
close_child_stdout=False, close_child_stderr=False,
**options):
self.name = name
self.use_sockets = use_sockets
self.on_demand = on_demand
self.res_name = name.lower().replace(" ", "_")
self.numprocesses = int(numprocesses)
self.warmup_delay = warmup_delay
self.cmd = cmd
self.args = args
self._status = "stopped"
self.graceful_timeout = float(graceful_timeout)
self.prereload_fn = prereload_fn
self.executable = None
self.priority = priority
self.stdout_stream_conf = copy.copy(stdout_stream)
self.stderr_stream_conf = copy.copy(stderr_stream)
self.stdout_stream = get_stream(self.stdout_stream_conf)
self.stderr_stream = get_stream(self.stderr_stream_conf)
self.stdout_redirector = self.stderr_redirector = None
self.max_retry = max_retry
self._options = options
self.singleton = singleton
self.copy_env = copy_env
self.copy_path = copy_path
self.virtualenv = virtualenv
self.max_age = int(max_age)
self.max_age_variance = int(max_age_variance)
self.ignore_hook_failure = ['before_stop', 'after_stop',
'before_signal', 'after_signal']
self.respawn = respawn
self.autostart = autostart
self.close_child_stdout = close_child_stdout
self.close_child_stderr = close_child_stderr
self.loop = loop or ioloop.IOLoop.instance()
if singleton and self.numprocesses not in (0, 1):
raise ValueError("Cannot have %d processes with a singleton "
" watcher" % self.numprocesses)
self.optnames = (("numprocesses", "warmup_delay", "working_dir",
"uid", "gid", "send_hup", "stop_signal",
"stop_children", "shell", "env", "max_retry", "cmd",
"args", "graceful_timeout", "executable",
"use_sockets", "priority", "copy_env", "singleton",
"stdout_stream_conf", "on_demand",
"stderr_stream_conf", "max_age", "max_age_variance",
"close_child_stdout", "close_child_stderr")
+ tuple(options.keys()))
if not working_dir:
# working dir hasn't been set
working_dir = util.get_working_dir()
self.working_dir = working_dir
self.processes = {}
self.shell = shell
self.uid = uid
self.gid = gid
if self.copy_env:
self.env = os.environ.copy()
if self.copy_path:
path = os.pathsep.join(sys.path)
self.env['PYTHONPATH'] = path
if env is not None:
self.env.update(env)
else:
if self.copy_path:
raise ValueError(('copy_env and copy_path must have the '
'same value'))
self.env = env
if self.virtualenv:
util.load_virtualenv(self)
# load directories in PYTHONPATH if provided
# so if a hook is there, it can be loaded
if self.env is not None and 'PYTHONPATH' in self.env:
for path in self.env['PYTHONPATH'].split(os.pathsep):
if path in sys.path:
continue
site.addsitedir(path)
self.rlimits = rlimits
self.send_hup = send_hup
self.stop_signal = stop_signal
self.stop_children = stop_children
self.sockets = self.evpub_socket = None
self.arbiter = None
self.hooks = {}
self._resolve_hooks(hooks)
def _reload_hook(self, key, hook, ignore_error):
hook_name = key.split('.')[-1]
self._resolve_hook(hook_name, hook, ignore_error)
def _reload_stream(self, key, val):
parts = key.split('.', 1)
if parts[0] == 'stdout':
self.stdout_stream_conf[parts[1]] = val
self.stdout_stream = get_stream(self.stdout_stream_conf)
else:
self.stderr_stream_conf[parts[1]] = val
self.stderr_stream = get_stream(self.stderr_stream_conf)
self._create_redirectors()
if self.stdout_redirector is not None:
self.stdout_redirector.start()
if self.stderr_redirector is not None:
self.stderr_redirector.start()
return 1
def _create_redirectors(self):
if self.stdout_stream:
if self.stdout_redirector is not None:
self.stdout_redirector.stop()
self.stdout_redirector = get_pipe_redirector(
self.stdout_stream, loop=self.loop)
else:
self.stdout_redirector = None
if self.stderr_stream:
if self.stderr_redirector is not None:
self.stderr_redirector.stop()
self.stderr_redirector = get_pipe_redirector(
self.stderr_stream, loop=self.loop)
else:
self.stderr_redirector = None
def _resolve_hook(self, name, callable_or_name, ignore_failure):
if is_callable(callable_or_name):
self.hooks[name] = callable_or_name
else:
# will raise ImportError on failure
self.hooks[name] = resolve_name(callable_or_name)
if ignore_failure:
self.ignore_hook_failure.append(name)
def _resolve_hooks(self, hooks):
"""Check the supplied hooks argument to make sure we can find
callables"""
if hooks is None:
return
for name, (callable_or_name, ignore_failure) in hooks.items():
self._resolve_hook(name, callable_or_name, ignore_failure)
@classmethod
def load_from_config(cls, config):
if 'env' in config:
config['env'] = parse_env_dict(config['env'])
cfg = config.copy()
w = cls(name=config.pop('name'), cmd=config.pop('cmd'), **config)
w._cfg = cfg
return w
@util.debuglog
def initialize(self, evpub_socket, sockets, arbiter):
self.evpub_socket = evpub_socket
self.sockets = sockets
self.arbiter = arbiter
def __len__(self):
return len(self.processes)
def notify_event(self, topic, msg):
"""Publish a message on the event publisher channel"""
name = bytestring(self.res_name)
multipart_msg = [b("watcher.%s.%s" % (name, topic)), json.dumps(msg)]
if self.evpub_socket is not None and not self.evpub_socket.closed:
self.evpub_socket.send_multipart(multipart_msg)
@util.debuglog
def reap_process(self, pid, status=None):
"""ensure that the process is killed (and not a zombie)"""
if pid not in self.processes:
return
process = self.processes.pop(pid)
if not status:
while True:
try:
_, status = os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno == errno.EAGAIN:
time.sleep(0.001)
continue
elif e.errno == errno.ECHILD:
# nothing to do here, we do not have any child
# process running
# but we still need to send the "reap" signal.
logger.debug('reaping already dead process %s [%s]',
pid, self.name)
self.notify_event(
"reap",
{"process_pid": pid, "time": time.time()})
process.stop()
return
else:
raise
# get return code
if os.WIFSIGNALED(status):
os.WTERMSIG(status)
# process exited using exit(2) system call; return the
# integer exit(2) system call has been called with
elif os.WIFEXITED(status):
os.WEXITSTATUS(status)
else:
# should never happen
raise RuntimeError("Unknown process exit status")
# if the process is dead or a zombie try to definitely stop it.
if process.status in (DEAD_OR_ZOMBIE, UNEXISTING):
process.stop()
logger.debug('reaping process %s [%s]', pid, self.name)
self.notify_event("reap", {"process_pid": pid, "time": time.time()})
@util.debuglog
def reap_processes(self):
"""Reap all the processes for this watcher.
"""
if self.is_stopped():
logger.debug('do not reap processes as the watcher is stopped')
return
# reap_process changes our dict, look through the copy of keys
for pid in list(self.processes.keys()):
self.reap_process(pid)
@gen.coroutine
@util.debuglog
def manage_processes(self):
"""Manage processes."""
if self.is_stopped():
return
# remove dead or zombie processes first
for process in list(self.processes.values()):
if process.status == DEAD_OR_ZOMBIE:
self.processes.pop(process.pid)
if self.max_age:
yield self.remove_expired_processes()
# adding fresh processes
if (self.respawn and len(self.processes) < self.numprocesses
and not self.is_stopping()):
yield self.spawn_processes()
# removing extra processes
if len(self.processes) > self.numprocesses:
processes_to_kill = []
for process in sorted(self.processes.values(),
key=lambda process: process.started,
reverse=True)[self.numprocesses:]:
if process.status == DEAD_OR_ZOMBIE:
self.processes.pop(process.pid)
else:
processes_to_kill.append(process)
removes = yield [self.kill_process(process)
for process in processes_to_kill]
for i, process in enumerate(processes_to_kill):
if removes[i]:
self.processes.pop(process.pid)
@gen.coroutine
@util.debuglog
def remove_expired_processes(self):
max_age = self.max_age + randint(0, self.max_age_variance)
expired_processes = [p for p in self.processes.values()
if p.age() > max_age]
removes = yield [self.kill_process(x) for x in expired_processes]
for i, process in enumerate(expired_processes):
if removes[i]:
self.processes.pop(process.pid)
@gen.coroutine
@util.debuglog
def reap_and_manage_processes(self):
"""Reap & manage processes."""
if self.is_stopped():
return
self.reap_processes()
yield self.manage_processes()
@gen.coroutine
@util.debuglog
def spawn_processes(self):
"""Spawn processes.
"""
# when an on_demand process dies, do not restart it until
# the next event
if self.on_demand and not self.arbiter.socket_event:
self._status = "stopped"
return
for i in range(self.numprocesses - len(self.processes)):
res = self.spawn_process()
if res is False:
yield self._stop()
break
yield tornado_sleep(self.warmup_delay)
def _get_sockets_fds(self):
# XXX should be cached
if self.sockets is None:
return {}
fds = {}
for name, sock in self.sockets.items():
fds[name] = sock.fileno()
return fds
def spawn_process(self):
"""Spawn process.
Return True if ok, False if the watcher must be stopped
"""
if self.is_stopped():
return True
if not self.call_hook('before_spawn'):
return False
cmd = util.replace_gnu_args(self.cmd, env=self.env)
nb_tries = 0
while nb_tries < self.max_retry or self.max_retry == -1:
process = None
pipe_stdout = self.stdout_redirector is not None
pipe_stderr = self.stderr_redirector is not None
try:
process = Process(self._nextwid, cmd,
args=self.args, working_dir=self.working_dir,
shell=self.shell, uid=self.uid, gid=self.gid,
env=self.env, rlimits=self.rlimits,
executable=self.executable,
use_fds=self.use_sockets, watcher=self,
pipe_stdout=pipe_stdout,
pipe_stderr=pipe_stderr,
close_child_stdout=self.close_child_stdout,
close_child_stderr=self.close_child_stderr)
# stream stderr/stdout if configured
if pipe_stdout and self.stdout_redirector is not None:
self.stdout_redirector.add_redirection('stdout',
process,
process.stdout)
if pipe_stderr and self.stderr_redirector is not None:
self.stderr_redirector.add_redirection('stderr',
process,
process.stderr)
self.processes[process.pid] = process
logger.debug('running %s process [pid %d]', self.name,
process.pid)
except OSError as e:
logger.warning('error in %r: %s', self.name, str(e))
if process is None:
nb_tries += 1
continue
else:
self.notify_event("spawn", {"process_pid": process.pid,
"time": time.time()})
return True
return False
@util.debuglog
def send_signal_process(self, process, signum):
"""Send the signum signal to the process
The signal is sent to the process itself then to all the children
"""
try:
# sending the signal to the process itself
self.send_signal(process.pid, signum)
self.notify_event("kill", {"process_pid": process.pid,
"time": time.time()})
except NoSuchProcess:
# already dead !
pass
# now sending the same signal to all the children
for child_pid in process.children():
try:
process.send_signal_child(child_pid, signum)
self.notify_event("kill", {"process_pid": child_pid,
"time": time.time()})
except NoSuchProcess:
# already dead !
pass
def _process_remove_redirections(self, process):
"""Remove process redirections
"""
if self.stdout_redirector is not None and process.stdout is not None:
self.stdout_redirector.remove_redirection(process.stdout)
if self.stderr_redirector is not None and process.stderr is not None:
self.stderr_redirector.remove_redirection(process.stderr)
@gen.coroutine
@util.debuglog
def kill_process(self, process):
"""Kill process (stop_signal, graceful_timeout then SIGKILL)
"""
if process.stopping:
raise gen.Return(False)
logger.debug("%s: kill process %s", self.name, process.pid)
if self.stop_children:
self.send_signal_process(process, self.stop_signal)
else:
self.send_signal(process.pid, self.stop_signal)
self.notify_event("kill", {"process_pid": process.pid,
"time": time.time()})
process.stopping = True
waited = 0
while waited < self.graceful_timeout:
yield tornado_sleep(1)
waited += 1
if not process.is_alive():
break
if waited >= self.graceful_timeout:
# We are not smart anymore
self.send_signal_process(process, signal.SIGKILL)
self._process_remove_redirections(process)
process.stopping = False
process.stop()
raise gen.Return(True)
@gen.coroutine
@util.debuglog
def kill_processes(self):
"""Kill all processes (stop_signal, graceful_timeout then SIGKILL)
"""
active_processes = self.get_active_processes()
try:
yield [self.kill_process(process) for process in active_processes]
except OSError as e:
if e.errno != errno.ESRCH:
raise
@util.debuglog
def send_signal(self, pid, signum):
if pid in self.processes:
process = self.processes[pid]
hook_result = self.call_hook("before_signal",
pid=pid, signum=signum)
if signum != signal.SIGKILL and not hook_result:
logger.debug("before_signal hook didn't return True "
"=> signal %i is not sent to %i" % (signum, pid))
else:
process.send_signal(signum)
self.call_hook("after_signal", pid=pid, signum=signum)
else:
logger.debug('process %s does not exist' % pid)
@util.debuglog
def send_signal_child(self, pid, child_id, signum):
"""Send signal to a child.
"""
process = self.processes[pid]
try:
process.send_signal_child(int(child_id), signum)
except OSError as e:
if e.errno != errno.ESRCH:
raise
@util.debuglog
def send_signal_children(self, pid, signum):
"""Send signal to all children.
"""
process = self.processes[int(pid)]
process.send_signal_children(signum)
@util.debuglog
def status(self):
return self._status
@util.debuglog
def process_info(self, pid):
process = self.processes[int(pid)]
return process.info()
@util.debuglog
def info(self):
return dict([(proc.pid, proc.info())
for proc in self.processes.values()])
@util.synchronized("watcher_stop")
@gen.coroutine
def stop(self):
yield self._stop()
@util.debuglog
@gen.coroutine
def _stop(self):
if self.is_stopped():
return
self._status = "stopping"
logger.debug('stopping the %s watcher' % self.name)
logger.debug('gracefully stopping processes [%s] for %ss' % (
self.name, self.graceful_timeout))
# We ignore the hook result
self.call_hook('before_stop')
yield self.kill_processes()
# stop redirectors
if self.stdout_redirector is not None:
self.stdout_redirector.stop()
self.stdout_redirector = None
if self.stderr_redirector is not None:
self.stderr_redirector.stop()
self.stderr_redirector = None
# notify about the stop
if self.evpub_socket is not None:
self.notify_event("stop", {"time": time.time()})
self._status = "stopped"
# We ignore the hook result
self.call_hook('after_stop')
logger.info('%s stopped', self.name)
def get_active_processes(self):
"""return a list of pids of active processes (not already stopped)"""
return [p for p in self.processes.values()
if p.status not in (DEAD_OR_ZOMBIE, UNEXISTING)]
def get_active_pids(self):
"""return a list of pids of active processes (not already stopped)"""
return [p.pid for p in self.processes.values()
if p.status not in (DEAD_OR_ZOMBIE, UNEXISTING)]
@property
def pids(self):
"""Returns a list of PIDs"""
return [process.pid for process in self.processes]
@property
def _nextwid(self):
used_wids = sorted([p.wid for p in self.processes.values()])
all_wids = range(1, self.numprocesses + 1)
for slot, wid in izip_longest(all_wids, used_wids, fillvalue=None):
if slot is None:
# should never happen
raise RuntimeError("Process count > numproceses")
elif wid is None:
return slot
def call_hook(self, hook_name, **kwargs):
"""Call a hook function"""
hook_kwargs = {'watcher': self, 'arbiter': self.arbiter,
'hook_name': hook_name}
hook_kwargs.update(kwargs)
if hook_name in self.hooks:
try:
result = self.hooks[hook_name](**hook_kwargs)
self.notify_event("hook_success",
{"name": hook_name, "time": time.time()})
except Exception as error:
logger.exception('Hook %r failed' % hook_name)
result = hook_name in self.ignore_hook_failure
self.notify_event("hook_failure",
{"name": hook_name, "time": time.time(),
"error": str(error)})
return result
else:
return True
@util.synchronized("watcher_start")
@gen.coroutine
def start(self):
yield self._start()
@gen.coroutine
@util.debuglog
def _start(self):
"""Start.
"""
if not self.is_stopped():
return
if self.on_demand and not self.arbiter.socket_event:
return
if not self.call_hook('before_start'):
logger.debug('Aborting startup')
return
self._status = "starting"
self._create_redirectors()
self.reap_processes()
yield self.spawn_processes()
if not self.call_hook('after_start'):
logger.debug('Aborting startup')
yield self._stop()
return
if self.stdout_redirector is not None:
self.stdout_redirector.start()
if self.stderr_redirector is not None:
self.stderr_redirector.start()
self._status = "active"
logger.info('%s started' % self.name)
self.notify_event("start", {"time": time.time()})
@util.synchronized("watcher_restart")
@gen.coroutine
def restart(self):
yield self._restart()
@gen.coroutine
@util.debuglog
def _restart(self):
yield self._stop()
yield self._start()
@util.synchronized("watcher_reload")
@gen.coroutine
def reload(self, graceful=True):
yield self._reload(graceful=graceful)
@gen.coroutine
@util.debuglog
def _reload(self, graceful=True):
""" reload
"""
if self.prereload_fn is not None:
self.prereload_fn(self)
if not graceful:
yield self._restart()
return
if self.send_hup:
for process in self.processes.values():
logger.info("SENDING HUP to %s" % process.pid)
process.send_signal(signal.SIGHUP)
else:
for i in range(self.numprocesses):
self.spawn_process()
yield self.manage_processes()
self.notify_event("reload", {"time": time.time()})
logger.info('%s reloaded', self.name)
@gen.coroutine
def set_numprocesses(self, np):
if np < 0:
np = 0
if self.singleton and np > 1:
raise ValueError('Singleton watcher has a single process')
self.numprocesses = np
yield self.manage_processes()
raise gen.Return(self.numprocesses)
@util.synchronized("watcher_incr")
@gen.coroutine
@util.debuglog
def incr(self, nb=1):
res = yield self.set_numprocesses(self.numprocesses + nb)
raise gen.Return(res)
@util.synchronized("watcher_decr")
@gen.coroutine
@util.debuglog
def decr(self, nb=1):
res = yield self.set_numprocesses(self.numprocesses - nb)
raise gen.Return(res)
@util.synchronized("watcher_set_opt")
def set_opt(self, key, val):
"""Set a watcher option.
This function set the watcher options. unknown keys are ignored.
This function return an action number:
- 0: trigger the process management
- 1: trigger a graceful reload of the processes;
"""
action = 0
if key in self._options:
self._options[key] = val
action = -1 # XXX for now does not trigger a reload
elif key == "numprocesses":
val = int(val)
if val < 0:
val = 0
if self.singleton and val > 1:
raise ValueError('Singleton watcher has a single process')
self.numprocesses = val
elif key == "warmup_delay":
self.warmup_delay = float(val)
elif key == "working_dir":
self.working_dir = val
action = 1
elif key == "uid":
self.uid = util.to_uid(val)
action = 1
elif key == "gid":
self.gid = util.to_gid(val)
action = 1
elif key == "send_hup":
self.send_hup = val
elif key == "stop_signal":
self.stop_signal = util.to_signum(val)
elif key == "stop_children":
self.stop_children = util.to_bool(val)
elif key == "shell":
self.shell = val
action = 1
elif key == "env":
self.env = val
action = 1
elif key == "cmd":
self.cmd = val
action = 1
elif key == "graceful_timeout":
self.graceful_timeout = float(val)
action = -1
elif key == "max_age":
self.max_age = int(val)
action = 1
elif key == "max_age_variance":
self.max_age_variance = int(val)
action = 1
elif (key.startswith('stdout_stream') or
key.startswith('stderr_stream')):
action = self._reload_stream(key, val)
elif key.startswith('hook'):
val = val.split(',')
if len(val) == 2:
ignore_error = util.to_bool(val[1])
else:
ignore_error = False
hook = val[0]
self._reload_hook(key, hook, ignore_error)
action = 1
# send update event
self.notify_event("updated", {"time": time.time()})
return action
@util.synchronized("watcher_do_action")
@gen.coroutine
def do_action(self, num):
# trigger needed action
if num == 0:
yield self.manage_processes()
else:
# graceful restart
yield self._restart()
@util.debuglog
def options(self, *args):
options = []
for name in sorted(self.optnames):
if name in self._options:
options.append((name, self._options[name]))
else:
options.append((name, getattr(self, name)))
return options
def is_stopping(self):
return self._status == 'stopping'
def is_stopped(self):
return self._status == 'stopped'
def is_active(self):
return self._status == 'active'
| StarcoderdataPython |
1753960 | import os
import sys
import time
import shutil
import argparse
import numpy as np
from PIL import Image
from skimage import io
from pathlib import Path
from matplotlib import cm
import matplotlib.pyplot as plt
from imgaug import augmenters as iaa
# Pycoco
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
ROOT_DIR = os.path.abspath(".")
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, 'pre_trained_weights/mask_rcnn_coco.h5')
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, 'models_new')
sys.path.append(ROOT_DIR)
# Hide deprecation warnings
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# Mask-RCNN
from mrcnn.config import Config
from mrcnn import model as modellib, utils, visualize
############################################################
# Configurations
############################################################
class RoofConfig(Config):
NAME = 'roof'
NUM_CLASSES = 1 + 1 # background + buildings
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 512
IMAGE_RESIZE_MODE = 'square'
# Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4
# Changing this requires other changes in the code. See the WIKI for more
# details: https://github.com/matterport/Mask_RCNN/wiki
IMAGE_CHANNEL_COUNT = 3
IMAGESHAPE = np.array([IMAGE_MAX_DIM,IMAGE_MAX_DIM,3])
# CIR
MEAN_PIXEL = np.array([118.81442702, 94.80935892, 103.60637387])
# IRndsm
# MEAN_PIXEL = np.array([118.81442702, 94.80935892, 10.85432061])
# CIR + ndsm
# MEAN_PIXEL = np.array([118.81442702, 94.80935892, 103.60637387, 10.85432061])
GPU_COUNT = 2
IMAGES_PER_GPU = 2
BACKBONE = 'resnet101'
# The strides of each layer of the FPN Pyramid. These values
# are based on a Resnet101 backbone.
BACKBONE_STRIDES = [4,8, 16, 32, 64]
STEPS_PER_EPOCH = 200 // IMAGES_PER_GPU
VALIDATION_STEPS = 20
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (10, 20, 40, 80, 160)
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5,1.0,2.0]
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.9
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56)
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 512
# Percent of positive ROIs used to train classifier/mask heads
ROI_POSITIVE_RATIO = .33
# Pooled ROIs
POOL_SIZE = 7
MASK_POOL_SIZE = 14
# Shape of output mask
# To change this you also need to change the neural network mask branch
MASK_SHAPE = [28, 28]
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 400
# Bounding box refinement standard deviation for RPN and final detections.
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
# Max number of final detections
DETECTION_MAX_INSTANCES = 400
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.5
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimizer
# implementation.
LEARNING_RATE = 0.002
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.005
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
LOSS_WEIGHTS = {
"rpn_class_loss": 1.,
"rpn_bbox_loss": 1,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 1.
}
# Gradient norm clipping
GRADIENT_CLIP_NORM = 5.0
class RoofInferenceConfig(RoofConfig):
# Set batch size to 1 to run one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
############################################################
# Dataset
############################################################
class RoofDataset(utils.Dataset):
def load_roof(self, dataset_dir, subset):
self.add_class('roof', 1, 'roof')
assert subset in ['train', 'val']
dataset_dir = os.path.join(dataset_dir, subset, 'images/')
# Get list of images
images = next(os.walk(dataset_dir))[2]
# Add images
for img in images:
self.add_image(
'roof',
image_id=img,
width=512, height=512,
path=os.path.join(dataset_dir, img))
def load_mask(self, image_id):
info = self.image_info[image_id]
mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), 'masks', info['id'])
mask = io.imread(os.path.splitext(mask_dir)[0] + '.tif')
instances = np.unique(mask)[1:]
all_masks = mask == instances[:, None, None]
all_masks = np.moveaxis(all_masks, 0, -1)
all_masks = all_masks.astype(bool)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID, we return an array of ones
class_ids = np.ones([all_masks.shape[-1]], dtype=np.int32)
return all_masks, class_ids
def image_reference(self, image_id):
info = self.image_info[image_id]
if info['source'] == 'roof':
return info['path']
else:
super(self.__class__, self).image_reference(image_id)
############################################################
# COCO Evaluation
############################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "coco"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
############################################################
# Training
############################################################
def train(model):
# Training dataset.
dataset_train = RoofDataset()
dataset_train.load_roof(args.dataset, 'train')
dataset_train.prepare()
# Validation dataset
dataset_val = RoofDataset()
dataset_val.load_roof(args.dataset, 'val')
dataset_val.prepare()
# Light augmentations
light_augm = iaa.SomeOf((0, 4), [
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
iaa.OneOf([iaa.Affine(rotate=90),
iaa.Affine(rotate=180),
iaa.Affine(rotate=270)]),
iaa.Affine(
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)})
])
# Medium augmentations
medium_augm = iaa.SomeOf((0, 4), [
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
iaa.Crop(percent=(0, 0.1)),
iaa.OneOf([iaa.Affine(rotate=90),
iaa.Affine(rotate=180),
iaa.Affine(rotate=270)]),
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)})
])
# Heavy augmentations
heavy_augm = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
iaa.Crop(percent=(0, 0.1)),
iaa.Sometimes(0.5, iaa.Crop(percent=(0, 0.1))),
iaa.Sometimes(0.5, iaa.OneOf([iaa.Affine(rotate=90),
iaa.Affine(rotate=180),
iaa.Affine(rotate=270)])),
iaa.Sometimes(0.5, iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-45, 45),
shear=(-4, 4)))
], random_order=True)
shutil.copyfile('./roof_plane_segmentation.py', './models_new/latest_config.py')
ep1 = 25
ep2 = ep1 + 15
ep3 = ep2 + 40
ep4 = ep3 + 40
ep5 = ep4 + 40
print('Training heads')
model.train(dataset_train, dataset_val,
learning_rate=0.002,
epochs=ep1,
augmentation=light_augm,
layers='heads')
model.train(dataset_train, dataset_val,
learning_rate=0.0005,
epochs=ep2,
augmentation=light_augm,
layers='heads')
print('Training resnet4+')
model.train(dataset_train, dataset_val,
learning_rate=0.0003,
epochs=ep3,
augmentation=light_augm,
layers='4+')
print('Training all layers')
model.train(dataset_train, dataset_val,
learning_rate=0.0001,
epochs=ep4,
augmentation=light_augm,
layers='all')
print('Training all layers')
model.train(dataset_train, dataset_val,
learning_rate=0.0001,
epochs=ep5,
augmentation=light_augm,
layers='all')
############################################################
# Predict
############################################################
def segment_region(model, data_path, output_path):
Path(output_path + 'images/').mkdir(parents=True, exist_ok=True)
Path(output_path + 'masks/').mkdir(parents=True, exist_ok=True)
(_, _, file_list) = next(os.walk(data_path))
for file in file_list:
original_image = io.imread(data_path + file)
results = model.detect([original_image], verbose=1)
r = results[0]
masks = r['masks']
# Generate mask
generated_mask = np.zeros((masks.shape[0], masks.shape[1]), dtype=np.uint8)
for i in range(1, masks.shape[2]):
generated_mask[masks[:, :, i - 1]] = i
io.imsave(output_path + 'masks/' + os.path.splitext(file)[0] + '.tif', generated_mask)
# Generate image with colored segments
image_arr = np.copy(original_image)
new_image = Image.fromarray(image_arr)
cmap = cm.get_cmap('winter', masks.shape[0])
for i in range(0, masks.shape[2]):
image_arr[masks[:, :, i], 0] = np.clip(cmap(i)[0] + np.random.rand(1) * 255, 0, 255)
image_arr[masks[:, :, i], 1] = np.clip(cmap(i)[1] + np.random.rand(1) * 255, 0, 255)
image_arr[masks[:, :, i], 2] = np.clip(cmap(i)[2] + np.random.rand(1) * 255, 0, 255)
mask_overlay = Image.fromarray(image_arr)
new_image.putalpha(255)
mask_overlay.putalpha(128)
new_image = Image.alpha_composite(new_image, mask_overlay)
new_image.save(output_path + 'images/' + os.path.splitext(file)[0] + '.tif')
############################################################
# Evaluate
############################################################
def calculate_map(model, iou):
dataset_val = RoofDataset()
dataset_val.load_roof(args.dataset, 'val')
dataset_val.prepare()
image_ids = dataset_val.image_ids
# VOC-Style mAP @ IoU=0.5
APs = []
for image_id in image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, RoofInferenceConfig,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, RoofInferenceConfig), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'], iou_threshold=iou)
APs.append(AP)
print("mAP: ", np.mean(APs))
return np.mean(APs)
if __name__ == '__main__':
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Mask R-CNN for nuclei counting and segmentation')
parser.add_argument('command',
metavar='<command>',
help="'train' or 'detect'")
parser.add_argument('--dataset', required=False,
metavar='/path/to/dataset/',
help='Root directory of the dataset')
parser.add_argument('--weights', required=True,
metavar='/path/to/weights.h5',
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar='/path/to/logs/',
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--subset', required=False,
metavar='Dataset sub-directory',
help='Subset of dataset to run prediction on')
parser.add_argument('--resultout', required=False,
metavar='/path/to/outdir',
help='Where to output predicted dirs')
args = parser.parse_args()
# Validate arguments
if args.command == 'train':
assert args.dataset, "Argument --dataset is required for training"
print('Weights: ', args.weights)
print('Dataset: ', args.dataset)
print('Logs: ', args.logs)
# Configurations
if args.command == 'train':
config = RoofConfig()
else:
config = RoofInferenceConfig()
config.display()
# Create model
if args.command == 'train':
model = modellib.MaskRCNN(mode='training', config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode='inference', config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == 'coco':
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == 'last':
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == 'imagenet':
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print('Loading weights ', weights_path)
if args.weights.lower() == 'coco':
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
'mrcnn_class_logits', 'mrcnn_bbox_fc',
'mrcnn_bbox', 'mrcnn_mask'])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == 'train':
train(model)
elif args.command == 'predict':
segment_region(model, './RoofPlaneDataset2/large_test/cir/val/images/', args.resultout.lower() + '/__TEST__/')
segment_region(model, './RoofPlaneDataset2/no_overlap/all/cir/images/', args.resultout.lower() + '/__ALL__/')
# segment_region(model, './RoofPlaneDataset2/large_test/cir/val/images/', './results/__TEST__/roof20220422T0706/cir/')
# segment_region(model, './RoofPlaneDataset2/no_overlap/all/cir/images/', './results/__ALL__/roof20220422T0706/cir/')
elif args.command == 'eval':
print('Map: 0.5')
calculate_map(model, 0.5)
print('Map: 0.75')
calculate_map(model, 0.75) | StarcoderdataPython |
30429 | <filename>katena_chain_sdk_py/serializer/bytes_field.py
"""
Copyright (c) 2019, TransChain.
This source code is licensed under the Apache 2.0 license found in the
LICENSE file in the root directory of this source tree.
"""
from marshmallow import fields
from base64 import b64encode, b64decode
class BytesField(fields.Field):
# BytesField allows to serialize and deserialize a bytes object.
def _serialize(self, value: bytes, attr, obj, **kwargs) -> str:
if value is None:
return ""
return b64encode(value).decode("utf-8")
def _deserialize(self, value: str, attr, data, **kwargs) -> bytes:
return b64decode(value.encode("utf-8"))
| StarcoderdataPython |
179508 | from .decode_predictions import decode_predictions
from .get_bboxes_from_quads import get_bboxes_from_quads
from .sort_quads_vertices import sort_quads_vertices
from .read_sample import read_sample
from .encode_textboxes import encode_textboxes
from .get_samples import get_samples
from .get_num_quads import get_num_quads
| StarcoderdataPython |
1725777 | <filename>musicsync/auth.py
import requests
from datetime import datetime, timedelta
from requests.auth import HTTPBasicAuth
from requests import Request
from .config import logger, \
GPM_APP_PASSWORD, \
GPM_EMAIL_ADDRESS, \
SPOTIFY_CLIENT_ID, \
SPOTIFY_CLIENT_SECRET
from .exceptions import AuthError
from .utils import _raise_for_error
BASE_URL = 'https://api.spotify.com/v1'
AUTHORIZE_TOKEN_URL = 'https://accounts.spotify.com/api/token'
OAUTH_USER_REQUEST_AUTHORIZE_URL = 'https://accounts.spotify.com/authorize/'
class BaseSpotifyAuth:
@property
def session(self):
raise NotImplementedError()
class GPMClientAuth:
email = ""
password = ""
def __init__(self, email=GPM_EMAIL_ADDRESS, password=<PASSWORD>):
self.email, self.password = email, password
class SpotifyClientAuth(BaseSpotifyAuth):
_client_secret = ""
_client_id = ""
_granted_token = ""
_token_expiry_date = datetime.now()
_session = None
def __init__(self, client_id=SPOTIFY_CLIENT_ID, client_secret=SPOTIFY_CLIENT_SECRET):
if not (client_id and client_secret):
raise AuthError("Invalid auth credentials provided")
self._client_id, self._client_secret = client_id, client_secret
@property
def session(self):
if not self._session:
self._session = requests.Session()
self._session.headers.update({
'Authorization': f"Bearer {self._get_auth()}"
})
return self._session
def _get_auth(self, re_auth=False):
if self._token_expiry_date > datetime.now() and not re_auth:
return self._granted_token
self._granted_token, self._token_expiry_date = self._authenticate()
return self._granted_token
def _authenticate(self):
data = {
'grant_type': 'client_credentials'
}
r = self._session.post(
AUTHORIZE_TOKEN_URL,
auth=HTTPBasicAuth(self._client_id, self._client_secret),
data=data
)
_raise_for_error(r)
body = r.json()
expiry = body.get('expires_in')
token = body.get('access_token')
if not expiry or not token:
logger.warning(
"Failed to authenticate with Spotify. Invalid tokens returned. Status code %s",
r.status_code
)
_raise_for_error(r)
token_expiry = datetime.now() + timedelta(seconds=expiry)
return token, token_expiry
class SpotifyOAuth(BaseSpotifyAuth):
_client_secret = ""
_client_id = ""
_oauth_code = ""
_refresh_token = ""
_granted_token = ""
_token_expiry_date = datetime.now()
_session = None
def __init__(
self,
client_id=SPOTIFY_CLIENT_ID,
client_secret=SPOTIFY_CLIENT_SECRET,
code='',
redirect_uri=''):
if not (client_id and client_secret and code and redirect_uri):
raise AuthError("Invalid auth credentials provided")
self._client_id = client_id
self._client_secret = client_secret
self._oauth_code = code
self._redirect_uri = redirect_uri
@property
def session(self):
if not self._session:
self._session = requests.Session()
self._session.headers.update({
'Authorization': f"Bearer {self._get_auth()}"
})
return self._session
# TODO: Could make this a non-static func? How keep the Spotify client using this Auth after a redirect?
@staticmethod
def get_oauth_url(client_id, redirect_uri, state):
payload = {
'client_id': client_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'state': state
}
return Request(
'GET',
OAUTH_USER_REQUEST_AUTHORIZE_URL,
params=payload
).prepare().url
def _get_auth(self, re_auth=False):
token_expired = self._token_expiry_date > datetime.now()
if token_expired and not re_auth:
return self._granted_token
self._granted_token, self._token_expiry_date, self._refresh_token = self._authenticate(refresh=token_expired)
return self._granted_token
def _authenticate(self, refresh=False):
data = {
'grant_type': 'refresh_token',
'refresh_token': self._refresh_token,
'redirect_uri': self._redirect_uri
}
if not refresh:
data['grant_type'] = 'authorization_code'
data['code'] = self._oauth_code,
r = self._session.post(
AUTHORIZE_TOKEN_URL,
auth=HTTPBasicAuth(self._client_id, self._client_secret),
data=data
)
_raise_for_error(r)
body = r.json()
expiry = body.get('expires_in')
token = body.get('access_token')
refresh_token = body.get('refresh_token')
if not (expiry and token and refresh_token):
logger.warning(
"Failed to authenticate with Spotify. Invalid tokens returned. Status code %s",
r.status_code
)
_raise_for_error(r)
token_expiry = datetime.now() + timedelta(seconds=expiry)
return token, token_expiry, refresh_token
| StarcoderdataPython |
180908 | <gh_stars>1-10
from typing import List
from uuid import UUID
from fastapi import APIRouter
from fastapi.param_functions import Depends
from sqlmodel import Session
from starlette.status import HTTP_201_CREATED
from src.core.controller import order
from src.core.helpers.database import make_session
from src.core.models import Context, CreateOrder, Order, QueryOrder
from src.core.models.order import OrderResponse, UpdateOrder
from src.utils.dependencies import context_manager
router = APIRouter()
@router.get("/", response_model=List[OrderResponse])
async def get_all(
query: QueryOrder = Depends(),
session: Session = Depends(make_session),
context: Context = Depends(context_manager),
):
return order.get_all(session, query, context=context)
@router.get("/{order_id}", response_model=Order)
async def get(order_id: UUID, session: Session = Depends(make_session), context: Context = Depends(context_manager)):
return order.get_by_id(session, order_id, context=context)
@router.post("/", response_model=Order, status_code=HTTP_201_CREATED)
async def create(
schema: CreateOrder, session: Session = Depends(make_session), context: Context = Depends(context_manager)
):
return order.register_sale(session, schema, context=context)
@router.delete("/{order_id}", response_model=Order)
async def delete(order_id: UUID, session: Session = Depends(make_session), context: Context = Depends(context_manager)):
return order.delete_by_id(session, order_id, context=context)
@router.patch("/", response_model=Order)
async def update(
data: UpdateOrder, session: Session = Depends(make_session), context: Context = Depends(context_manager)
):
return order.update(session, data, context=context)
| StarcoderdataPython |
3290574 | import os
import sys
import glob
#from pybp import version
from setuptools import setup
from setuptools import find_packages
# Package info
PACKAGE_NAME = "ipydebug"
DESCRIPTION = "Breakpoints and logging class for python debugging"
LONG_DESC = "Function wrappers and breakpoint class to insert breakpoints into a python script"
AUTHOR = "<NAME>"
AUTHOR_EMAIL = "<EMAIL>"
LICENSE = "BSD 3-clause"
URL = "http://fred3m.github.io/pybp/"
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '0.0'
#if 'dev' not in VERSION:
# VERSION += version.get_git_devstr(False)
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
packages = find_packages()
setup(name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
packages=packages,
scripts=scripts,
install_requires=[
'ipython'
],
#provides=[PACKAGE_NAME],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESC,
zip_safe=False,
use_2to3=True,
include_package_data=True
) | StarcoderdataPython |
158601 | #final version for automating an email that the team gets every day
#libraries
#web driver (need selenium because of Java Script)
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
#html parser
from bs4 import BeautifulSoup as bs
#ssh library
import paramiko
#date library
from datetime import datetime, timedelta
#email library
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
#splunk libraries
import splunklib.client as splunk_client
from time import sleep
import time
import splunklib.results as results
#Java Script Table parser
#@@Functionality: Takes a web page that contains a Java Script table and parses the table. Stores data in the table
# in a list that is passed in by reference
#@@Params: Web page that has a Java Script table that we wish to parse
# List that we wish to store table data in
#@@Return: None, pass by reference
def table_parser(page, infections):
data = []
soup = bs(page, 'html.parser')
table = soup.find('table', attrs = {'class' : 'dashboard-table'})
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
for col in cols:
data.append(col.get_text())
for x in range(0, len(data), 2):
name = data[x]
name = name[2:]
name = name.split(' -')
name = name[0]
string = data[x+1] + ' - ' + name
infections.append(string)
#Splunk API searcher
#@@Functionality: Takes a splunk search query and returns the result
#@@Params: Search query that you wish to execute
#@@Return: Search result
def splunk_search(search_query):
kwargs_search = {'exec_mode': 'normal'}
job = service.jobs.create(search_query, **kwargs_search)
while True:
while not job.is_ready():
pass
if job['isDone'] == '1':
break
sleep(2)
for result in results.ResultsReader(job.results()):
job.cancel()
return result['count']
#file paths
driver_path = 'XXXX'
private_key_path = 'XXXX'
local_file_path = 'XXXX'
email_template_path = 'XXXX'
email_img_path = 'XXXX'
splunk_password = '<PASSWORD>'
#variables that we are going to be using in order to construct the email
num_infected = 0
malware_infections = []
ddos_attacks = 0
wip = 0
aepp = 0
waf = 0
iam = 0
#web scrape the new netguard box
#options for a headless browser
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('headless')
driver = webdriver.Chrome(executable_path=driver_path, options = chrome_options)
driver.get('XXXX')
username = driver.find_element_by_id('XXXX')
password = driver.find_element_by_id('XXXX')
username.send_keys('XXXX')
password.send_keys('<PASSWORD>')
driver.find_element_by_id('XXXX').click()
#@@DEBUG print('scraping table info')
#parse the table and collect data
table_parser(driver.page_source, malware_infections)
#@@DEBUG print('table scrapped')
#get daily number of infections
driver.find_element_by_id('XXXX').click()
#sometimes the data does not load right away, so delay until present
delay = 5
time.sleep(2)
try:
elem_present = EC.presence_of_element_located((By.ID, 'XXXX'))
WebDriverWait(driver, delay).until(elem_present)
except:
"Page Time Out"
#collect the total number of infected for the day
#@@DEBUG print('scraping total number of infected')
page = driver.page_source
soup = bs(page, 'html.parser')
num_infected = soup.select('XXXX')[0].text
num_infected = num_infected[XXXX:]
num_infected = num_infected.split(' ')
num_infected = num_infected[0]
#logout
driver.find_element_by_id('XXXX').click()
#use scp to get DDoS info from XXXX
key = paramiko.RSAKey.from_private_key_file(private_key_path)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname = 'XXXX', username = 'XXXX', pkey = key)
#day month year format
#@@DEBUG print('getting DDoS info')
date = datetime.today()# - timedelta(days = 1) #run today's count instead of yesterdays due to report not running on weekends
year = date.strftime('%Y')
day = date.strftime('%d%m')
year = year[2:]
date = day + year
file_name = 'XXXX' + date + 'XXXX'
remote_file_path = 'XXXX' + file_name
ftp_client = client.open_sftp()
ftp_client.get(remote_file_path, local_file_path)
ftp_client.close()
ddos_file = open(local_file_path, 'r')
ddos_string = ddos_file.read()
ddos_attacks = ddos_string.strip('XXXX')
#@@DEBUG print('starting splunk searches')
#get Splunk info
service = splunk_client.connect(host = 'XXXX', port = XXXX, username = 'XXXX', password = <PASSWORD>unk_password)
searchquery = 'XXXX'
splunk1 = splunk_search(searchquery)
#@@DEBUG print('first splunk search')
searchquery = 'XXXX'
splunk2 = splunk_search(searchquery)
#@@DEBUG print('second splunk search')
searchquery = 'XXXX'
splunk3 = splunk_search(searchquery)
#@@DEBUG print('fourth splunk search')
searchquery = 'XXXX'
splunk4 = splunk_search(searchquery)
#@@DEBUG print('finished splunk searches')
#compose email
email_template = open(email_template_path, 'r')
email_template_string = email_template.read()
email_template.close()
email_template_string = email_template_string.replace('replace1', ddos_attacks, 1) #replace only once because was messing up replace10
email_template_string = email_template_string.replace('replace2', num_infected, 1)
email_template_string = email_template_string.replace('replace3', malware_infections[0], 1)
email_template_string = email_template_string.replace('replace4', malware_infections[1], 1)
email_template_string = email_template_string.replace('replace5', malware_infections[2], 1)
email_template_string = email_template_string.replace('replace6', malware_infections[3], 1)
email_template_string = email_template_string.replace('replace7', malware_infections[4], 1)
email_template_string = email_template_string.replace('replace8', splunk1, 1)
email_template_string = email_template_string.replace('replace9', splunk2, 1)
email_template_string = email_template_string.replace('replace10', splunk3, 1)
email_template_string = email_template_string.replace('replace11', splunk4, 1)
#send email
sender = 'XXXX'
recipient = 'XXXX'
msg = MIMEMultipart('related')
date = datetime.today().strftime('%m/%d/%Y')
msg['Subject'] = 'XXXX ' + date
msg['From'] = sender
msg['To'] = recipient
part = MIMEText(email_template_string, 'html')
msg.attach(part)
fp = open(email_img_path, 'rb')
msg_img =MIMEImage(fp.read())
fp.close()
msg_img.add_header('Content-ID', '<image1>')
msg.attach(msg_img)
server = smtplib.SMTP('XXXX')
server.sendmail(sender, recipient, msg.as_string())
server.quit()
#print('done') | StarcoderdataPython |
1640986 | <reponame>olivier-m/miniature
# -*- coding: utf-8 -*-
#
# This file is part of Tamia released under the MIT license.
# See the LICENSE for more information.
from __future__ import (print_function, division, absolute_import, unicode_literals)
import unittest
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3259439 | import requests
import json
URL = "https://api.github.com/users/sivanWu0222"
response = requests.get(URL)
print(type(response))
response_dict = response.json()
print(response_dict)
| StarcoderdataPython |
3251254 | """
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
from typing import List, Tuple
from fuse.data.data_source.data_source_base import FuseDataSourceBase
from fuse_examples.classification.prostate_x.data_utils import FuseProstateXUtilsData
class FuseProstateXDataSourcePatient(FuseDataSourceBase):
def __init__(self,
db_path: str,
set_type: str,
db_name: str,
db_ver:int = 11,
fold_no: int=0,
include_gt: bool = True,
include_pred: bool = True,
):
"""
Fuse DataSource for ProstateX data.
Generate sample decription per patient
:param set_type: 'train' 'validation' 'test'
:param db_ver: database version
:type include_gt: create two descriptors per patient - with 'gt' key and without
:return list of sample descriptors
"""
self.db_path = db_path
self.set_type = set_type
self.db_ver = db_ver
self.include_gt = include_gt
self.include_pred = include_pred
self.db_name = db_name
self.fold_no = fold_no
self.desc_list = self.generate_patient_list()
def get_samples_description(self):
return list(self.desc_list)
def summary(self) -> str:
"""
See base class
"""
summary_str = ''
summary_str += f'Class = {type(self)}\n'
summary_str += f'Input source = {self.set_type}\n'
summary_str += f'Number of Patients = {len(self.desc_list)}\n'
return summary_str
def generate_patient_list(self) -> List[Tuple]:
'''
Go Over all patients and create a tuple list of (db_ver, set_type, patient_id [,'gt'])
:return: list of patient descriptors
'''
data = FuseProstateXUtilsData.get_dataset(self.db_path,self.set_type, self.db_ver,self.db_name,self.fold_no)
if self.db_name=='prostate_x':
data_lesions = FuseProstateXUtilsData.get_lesions_prostate_x(data)
patients = list(data_lesions['Patient ID'].unique())
desc_list = []
if self.include_pred:
desc_list += [(self.db_ver, self.set_type, patient_id, 'pred') for patient_id in patients]
if self.include_gt:
desc_list += [(self.db_ver, self.set_type, patient_id, 'gt') for patient_id in patients]
return desc_list
if __name__ == "__main__":
path_to_db = '/gpfs/haifa/projects/m/msieve_dev3/usr/Tal/my_research/virtual_biopsy/prostate/experiments/V1/'
train_data_source = FuseProstateXDataSourcePatient(path_to_db,'train',db_name='tcia', db_ver='18042021',fold_no=0, include_gt=False)
| StarcoderdataPython |
26290 | import copy
import json
import numpy as np
import pandas as pd
import basicDeltaOperations as op
import calcIsotopologues as ci
import fragmentAndSimulate as fas
import solveSystem as ss
'''
This is a set of functions to quickly initalize methionine molecules based on input delta values and to simulate its fragmentation. See runAllTests for implementation.
'''
def initializeMethionine(deltas, fragSubset = ['full','133','104','102','88','74High','74Low','61','56'], printHeavy = True):
'''
Initializes methionine, returning a dataframe with basic information about the molecule as well as information about fragmentation.
Inputs:
deltas: A list of 13 M1 delta values, giving the delta values by site for the 13C, 17O, 15N, 33S, and 2H isotopes. The sites are defined in the IDList variable, below.
fragSubset: A list giving the subset of fragments to observe. If you are not observing all fragments, you may input only those you do observe.
printHeavy: The user manually specifies delta 17O, and delta 18O is set via mass scaling (see basicDeltaOperations). If True, this will print out delta 18O, 34S, & 36S.
Outputs:
molecularDataFrame: A dataframe containing basic information about the molecule.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
fragmentationDictionary: A dictionary like the allFragments variable, but only including the subset of fragments selected by fragSubset.
'''
##### INITIALIZE SITES #####
IDList = ['Cmethyl','Cgamma','Calphabeta','Ccarboxyl','Ocarboxyl','Ssulfur','Namine','Hmethyl','Hgamma',
'Halphabeta','Hamine','Hhydroxyl','Hprotonated']
elIDs = ['C','C','C','C','O','S','N','H','H','H','H','H','H']
numberAtSite = [1,1,2,1,2,1,1,3,2,3,2,1,1]
l = [elIDs, numberAtSite, deltas]
cols = ['IDS','Number','deltas']
condensedFrags =[]
fragKeys = []
#88 and both 74 are conjecture. 74 High has only one oxygen, so we generally do not use it.
allFragments = {'full':{'01':{'subgeometry':[1,1,1,1,1,1,1,1,1,1,1,1,1],'relCont':1}},
'133':{'01':{'subgeometry':[1,1,1,1,1,1,'x',1,1,1,'x',1,'x'],'relCont':1}},
'104':{'01':{'subgeometry':[1,1,1,'x','x',1,1,1,1,1,1,'x','x'],'relCont':1}},
'102':{'01':{'subgeometry':['x',1,1,1,1,'x',1,'x',1,1,1,1,'x'],'relCont':1}},
'88':{'01':{'subgeometry':[1,1,1,'x','x',1,'x',1,1,'x',1,'x','x'],'relCont':1}},
'74High':{'01':{'subgeometry':[1,'x',1,'x',1,'x',1,1,1,1,'x','x','x'],'relCont':1}},
'74Low':{'01':{'subgeometry':[1,1,'x','x',1,'x',1,'x',1,'x',1,'x','x'],'relCont':1}},
'61':{'01':{'subgeometry':[1,1,'x','x','x',1,'x',1,1,'x','x','x','x'],'relCont':1}},
'56':{'01':{'subgeometry':['x',1,1,'x','x','x',1,'x',1,1,'x',1,'x'],'relCont':1}}}
fragmentationDictionary = {key: value for key, value in allFragments.items() if key in fragSubset}
for fragKey, subFragDict in fragmentationDictionary.items():
for subFragNum, subFragInfo in subFragDict.items():
l.append(subFragInfo['subgeometry'])
cols.append(fragKey + '_' + subFragNum)
condensedFrags.append(subFragInfo['subgeometry'])
fragKeys.append(fragKey + '_' + subFragNum)
molecularDataFrame = pd.DataFrame(l, columns = IDList)
molecularDataFrame = molecularDataFrame.transpose()
molecularDataFrame.columns = cols
expandedFrags = [fas.expandFrag(x, numberAtSite) for x in condensedFrags]
if printHeavy:
SConc = op.deltaToConcentration('S',deltas[5])
del34 = op.ratioToDelta('34S',SConc[2]/SConc[0])
del36 = op.ratioToDelta('36S',SConc[3]/SConc[0])
OConc = op.deltaToConcentration('O',deltas[4])
del18 = op.ratioToDelta('18O',OConc[2]/OConc[0])
print("Delta 34S")
print(del34)
print("Delta 36S")
print(del36)
print("Delta 18O")
print(del18)
return molecularDataFrame, expandedFrags, fragKeys, fragmentationDictionary
def simulateMeasurement(molecularDataFrame, fragmentationDictionary, expandedFrags, fragKeys, abundanceThreshold = 0, UValueList = [],
massThreshold = 4, clumpD = {}, outputPath = None, disableProgress = False, calcFF = False, fractionationFactors = {}, omitMeasurements = {}, ffstd = 0.05, unresolvedDict = {}, outputFull = False):
'''
Simulates M+N measurements of a methionine molecule with input deltas specified by the input dataframe molecularDataFrame.
Inputs:
molecularDataFrame: A dataframe containing basic information about the molecule.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
fragmentationDictionary: A dictionary like the allFragments variable from initalizeMethionine, but only including the subset of fragments selected by fragSubset.
abundanceThreshold: A float; Does not include measurements below this M+N relative abundance, i.e. assuming they will not be measured due to low abundance.
UValueList: A list giving specific substitutions to calculate molecular average U values for ('13C', '15N', etc.)
massThreshold: An integer; will calculate M+N relative abundances for N <= massThreshold
clumpD: Specifies information about clumps to add; otherwise the isotome follows the stochastic assumption. Currently works only for mass 1 substitutions (e.g. 1717, 1317, etc.) See ci.introduceClump for details.
outputPath: A string, e.g. 'output', or None. If it is a string, outputs the simulated spectrum as a json.
disableProgress: Disables tqdm progress bars when True.
calcFF: When True, computes a new set of fractionation factors for this measurement.
fractionationFactors: A dictionary, specifying a fractionation factor to apply to each ion beam. This is used to apply fractionation factors calculated previously to this predicted measurement (e.g. for a sample/standard comparison with the same experimental fractionation)
omitMeasurements: omitMeasurements: A dictionary, {}, specifying measurements which I will not observed. For example, omitMeasurements = {'M1':{'61':'D'}} would mean I do not observe the D ion beam of the 61 fragment of the M+1 experiment, regardless of its abundance.
ffstd: A float; if new fractionation factors are calculated, they are pulled from a normal distribution centered around 1, with this standard deviation.
unresolvedDict: A dictionary, specifying which unresolved ion beams add to each other.
outputFull: A boolean. Typically False, in which case beams that are not observed are culled from the dictionary. If True, includes this information; this should only be used for debugging, and will likely break the solver routine.
Outputs:
predictedMeasurement: A dictionary giving information from the M+N measurements.
MN: A dictionary where keys are mass selections ("M1", "M2") and values are dictionaries giving information about the isotopologues of each mass selection.
fractionationFactors: The calculated fractionation factors for this measurement (empty unless calcFF == True)
'''
M1Only = False
if massThreshold == 1:
M1Only = True
byAtom = ci.inputToAtomDict(molecularDataFrame, disable = disableProgress, M1Only = M1Only)
#Introduce any clumps of interest with clumps
if clumpD == {}:
bySub = ci.calcSubDictionary(byAtom, molecularDataFrame, atomInput = True)
else:
print("Adding clumps")
stochD = copy.deepcopy(byAtom)
for clumpNumber, clumpInfo in clumpD.items():
byAtom = ci.introduceClump(byAtom, clumpInfo['Sites'], clumpInfo['Amount'], molecularDataFrame)
for clumpNumber, clumpInfo in clumpD.items():
ci.checkClumpDelta(clumpInfo['Sites'], molecularDataFrame, byAtom, stochD)
bySub = ci.calcSubDictionary(byAtom, molecularDataFrame, atomInput = True)
#Initialize Measurement output
if disableProgress == False:
print("Simulating Measurement")
allMeasurementInfo = {}
allMeasurementInfo = fas.UValueMeasurement(bySub, allMeasurementInfo, massThreshold = massThreshold,
subList = UValueList)
MN = ci.massSelections(byAtom, massThreshold = massThreshold)
MN = fas.trackMNFragments(MN, expandedFrags, fragKeys, molecularDataFrame, unresolvedDict = unresolvedDict)
predictedMeasurement, FF = fas.predictMNFragmentExpt(allMeasurementInfo, MN, expandedFrags, fragKeys, molecularDataFrame,
fragmentationDictionary,
abundanceThreshold = abundanceThreshold, calcFF = calcFF, ffstd = ffstd, fractionationFactors = fractionationFactors, omitMeasurements = omitMeasurements, unresolvedDict = unresolvedDict, outputFull = outputFull)
if outputPath != None:
output = json.dumps(predictedMeasurement)
f = open(outputPath + ".json","w")
f.write(output)
f.close()
return predictedMeasurement, MN, FF
def updateAbundanceCorrection(latestDeltas, fragSubset, fragmentationDictionary, expandedFrags,
fragSubgeometryKeys, processStandard, processSample, isotopologuesDict, UValuesSmp, molecularDataFrame,
NUpdates = 30, breakCondition = 1, perturbTheoryOAmt = 0.002,
experimentalOCorrectList = [],
abundanceThreshold = 0,
massThreshold = 1,
omitMeasurements = {},
unresolvedDict = {},
UMNSub = ['13C'],
N = 100,
setSpreadByExtreme = False,
oACorrectBounds = False):
'''
A function for the iterated abundance correction. This function iterates N times; for each, it:
1) takes the most recent set of deltas, recomputes the predicted measurement of methionine with them, and uses this to update the O value correction.
2) Defines a reasonable standard deviation to sample around this O value, based on the perturbTheoryOAmt parameter (e.g. sigma of 0.002 * O_correct)
3) Recalculates the site specific structure using the new correction factors.
4) Checks if the difference between the old deltas and new deltas is smaller than a break condition; if so, ends the routine.
It outputs the final set of results and thisODict, a data product storing information about the correction procedure.
Inputs:
latestDeltas: The input deltas to use for the first iteration of the procedure.
fragSubset: A list giving the subset of fragments to observe. If you are not observing all fragments, you may input only those you do observe.
fragmentationDictionary: A dictionary like the allFragments variable from initalizeMethionine, but only including the subset of fragments selected by fragSubset.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
processStandard: A dictionary containing data from several measurements, in the form: process[fileKey][MNKey][fragKey] = {'Observed Abundance':A list of floats,
'Subs':A list of strings
'Error':A list of floats
'predicted Abundance':A list of floats}
it should have information for each measurement of each observation. See runAllTests for implementation.
processSample: As processStandard, but the 'Predicted Abundance' terms will be an empty list.
isotopologuesDict: isotopologuesDict: A dictionary where the keys are "M0", "M1", etc. and the values are dataFrames giving the isotopologues with those substitutions.
UValuesSmp: A dictionary specifying the molecular average U values and their errors, i.e. {'13C':'Observed':float,'Error':float}. See readInput.readComputedUValues
molecularDataFrame: A dataFrame containing information about the molecule.
NUpdates: The maximum number of iterations to perform.
breakCondition: Each iteration, a residual is calculated as the sum of squares between all delta values. If that sums is <break condition, the routine ends.
perturbTheoryOAmt: Each O correction is given as a mean and a sigma. Then for each iteration of the Monte Carlo, we draw a new factor from this distribution. This parameter determines the relative width, e.g. sigma = mean * perturbTheoryOAmt
N = 100: The number of iterations for each MN Monte Carlo. E.g., if NUPdates is 30 and N is 100, we recalculate the methionine spectrum 30 times. Each iteration, we solve for site specific values using a monte carlo routine with N = 100.
UMNSub: Sets the specific substitutions that we will use molecular average U values from to calculate UMN. Otherwise it will use all molecular average U values for that UMN. Recommended to use--the procedure only works for substitions that are totally solved for. For example, if one 13C 13C isotopologue is not solved for precisely in M+N relative abundance space, we should not use 13C13C in the UMN routine. The best candidates tend to be abundant things--36S, 18O, 13C, 34S, and so forth.
abundanceThreshold, massThreshold, omitMeasurements, unresolvedDict: See simulateMeasurement; set these parameters for each simulated dataset.
experimentalOCorrectList: A list, containing information about which peaks to use experimental correction for. See solveSystem.perturbSample.
Outputs:
M1Results: A dataframe giving the final results of the iterated correction process.
thisODict: A dictionary containing information about each correction (all except Histogram) and histograms of the sampled O values from every 10th iteration (as well as the final iteration).
'''
#Initialize dictionary to track output of iterated correction process.
thisODict = {'residual':[],
'delta':[],
'O':[],
'relDelta':[],
'relDeltaErr':[],
'Histogram':[]}
for i in range(NUpdates):
oldDeltas = latestDeltas
#Get new dataframe, simulate new measurement.
M1Df, expandedFrags, fragSubgeometryKeys, fragmentationDictionary = initializeMethionine(latestDeltas, fragSubset,
printHeavy = False)
predictedMeasurementUpdate, MNDictUpdate, FFUpdate = simulateMeasurement(M1Df, fragmentationDictionary,
expandedFrags,
fragSubgeometryKeys,
abundanceThreshold = abundanceThreshold,
massThreshold = massThreshold,
calcFF = False,
outputPath = None,
disableProgress = True,
fractionationFactors = {},
omitMeasurements = omitMeasurements,
unresolvedDict = unresolvedDict)
#Generate new O Corrections
OCorrectionUpdate = ss.OValueCorrectTheoretical(predictedMeasurementUpdate, processSample,
massThreshold = massThreshold)
#For each O correction, generate a normal distribution. The computed value is the mean, and the sigma is set by perturbTheoryOAmt.
#explicitOCorrect may optionally contain a "Bounds" entry, when using extreme values. For example, explicitOCorrect[MNKey][fragKey] = (Lower Bound, Upper Bound).
#This is not implemented in this routine.
explicitOCorrect = {}
for MNKey, MNData in OCorrectionUpdate.items():
if MNKey not in explicitOCorrect:
explicitOCorrect[MNKey] = {}
for fragKey, fragData in MNData.items():
if fragKey not in explicitOCorrect[MNKey]:
explicitOCorrect[MNKey][fragKey] = {}
explicitOCorrect[MNKey][fragKey]['Mu,Sigma'] = (fragData, fragData * perturbTheoryOAmt)
M1Results = ss.M1MonteCarlo(processStandard, processSample, OCorrectionUpdate, isotopologuesDict,
fragmentationDictionary, perturbTheoryOAmt = perturbTheoryOAmt,
experimentalOCorrectList = experimentalOCorrectList,
N = N, GJ = False, debugMatrix = False, disableProgress = True,
storePerturbedSamples = False, storeOCorrect = True,
explicitOCorrect = explicitOCorrect, perturbOverrideList = ['M1'])
processedResults = ss.processM1MCResults(M1Results, UValuesSmp, isotopologuesDict, molecularDataFrame, disableProgress = True,
UMNSub = UMNSub)
ss.updateSiteSpecificDfM1MC(processedResults, molecularDataFrame)
M1Df = molecularDataFrame.copy()
M1Df['deltas'] = M1Df['VPDB etc. Deltas']
thisODict['O'].append(copy.deepcopy(OCorrectionUpdate['M1']))
thisODict['delta'].append(list(M1Df['deltas']))
residual = ((np.array(M1Df['deltas']) - np.array(oldDeltas))**2).sum()
thisODict['residual'].append(residual)
latestDeltas = M1Df['deltas'].values
thisODict['relDelta'].append(M1Df['Relative Deltas'].values)
thisODict['relDeltaErr'].append(M1Df['Relative Deltas Error'].values)
print(residual)
if i % 10 == 0 or residual <= breakCondition:
correctVals = {'61':[],
'133':[],
'full':[]}
for res in M1Results['Extra Info']['O Correct']:
correctVals['full'].append(res['full'])
correctVals['133'].append(res['133'])
correctVals['61'].append(res['61'])
thisODict['Histogram'].append(copy.deepcopy(correctVals))
if residual <= breakCondition:
break
return M1Results, thisODict | StarcoderdataPython |
3226712 | <reponame>islandowner-web/IT-MOOC
# Generated by Django 2.2 on 2019-12-11 19:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('organizations', '0017_remove_teacher_work_company'),
]
operations = [
migrations.RemoveField(
model_name='teacher',
name='points',
),
]
| StarcoderdataPython |
3237455 | <filename>pykeg/backup/mysql.py
"""MySQL-specific database backup/restore implementation."""
from builtins import str
import logging
import subprocess
from django.conf import settings
from django.apps import apps
logger = logging.getLogger(__name__)
DEFAULT_DB = "default"
# Common command-line arguments
PARAMS = {
"db": settings.DATABASES[DEFAULT_DB].get("NAME"),
"user": settings.DATABASES[DEFAULT_DB].get("USER"),
"password": settings.DATABASES[DEFAULT_DB].get("PASSWORD"),
"host": settings.DATABASES[DEFAULT_DB].get("HOST"),
"port": settings.DATABASES[DEFAULT_DB].get("PORT"),
}
DEFAULT_ARGS = []
if PARAMS.get("user"):
DEFAULT_ARGS.append("--user={}".format(PARAMS["user"]))
if PARAMS.get("password"):
DEFAULT_ARGS.append("--password={}".format(PARAMS["password"]))
if PARAMS.get("host"):
DEFAULT_ARGS.append("--host={}".format(PARAMS["host"]))
if PARAMS.get("port"):
DEFAULT_ARGS.append("--port={}".format(PARAMS["port"]))
def engine_name():
return "mysql"
def is_installed():
args = ["mysql", "--batch"] + DEFAULT_ARGS + [PARAMS["db"]]
args += ["-e", "'show tables like \"core_kegbotsite\";'"]
cmd = " ".join(args)
logger.info("command: {}".format(cmd))
output = subprocess.check_output(cmd, shell=True)
logger.info("result: {}".format(output))
return "core_kegbotsite" in output
def dump(output_fd):
args = ["mysqldump", "--skip-dump-date", "--single-transaction"]
if PARAMS.get("user"):
args.append("--user={}".format(PARAMS["user"]))
if PARAMS.get("password"):
args.append("--password={}".format(PARAMS["password"]))
if PARAMS.get("host"):
args.append("--host={}".format(PARAMS["host"]))
if PARAMS.get("port"):
args.append("--port={}".format(PARAMS["port"]))
args.append(PARAMS["db"])
cmd = " ".join(args)
logger.info(cmd)
return subprocess.check_call(cmd, stdout=output_fd, shell=True)
def restore(input_fd):
args = ["mysql"] + DEFAULT_ARGS
args.append(PARAMS["db"])
cmd = " ".join(args)
logger.info(cmd)
return subprocess.check_call(cmd, stdin=input_fd, shell=True)
def erase():
args = ["mysql"]
if PARAMS.get("user"):
args.append("--user={}".format(PARAMS["user"]))
if PARAMS.get("password"):
args.append("--password={}".format(PARAMS["password"]))
if PARAMS.get("host"):
args.append("--host={}".format(PARAMS["host"]))
if PARAMS.get("port"):
args.append("--port={}".format(PARAMS["port"]))
args += [PARAMS["db"]]
# Build the sql command.
tables = [str(model._meta.db_table) for model in apps.get_models()]
query = ["DROP TABLE IF EXISTS {};".format(t) for t in tables]
query = ["SET FOREIGN_KEY_CHECKS=0;"] + query + ["SET FOREIGN_KEY_CHECKS=1;"]
query = " ".join(query)
cmd = " ".join(args + ["-e", "'{}'".format(query)])
logger.info(cmd)
subprocess.check_call(cmd, shell=True)
| StarcoderdataPython |
3376690 | <reponame>pasmuss/cmssw
import FWCore.ParameterSet.Config as cms
hcaldigisAnalyzer = cms.EDAnalyzer("HcalDigisValidation",
outputFile = cms.untracked.string(''),
digiTag = cms.InputTag("hcalDigis"),
QIE10digiTag= cms.InputTag("hcalDigis"),
QIE11digiTag= cms.InputTag("hcalDigis"),
mode = cms.untracked.string('multi'),
hcalselector= cms.untracked.string('all'),
mc = cms.untracked.string('yes'),
simHits = cms.untracked.InputTag("g4SimHits","HcalHits"),
emulTPs = cms.InputTag("emulDigis"),
dataTPs = cms.InputTag("simHcalTriggerPrimitiveDigis"),
TestNumber = cms.bool(False),
hep17 = cms.bool(False)
)
from Configuration.Eras.Modifier_fastSim_cff import fastSim
if fastSim.isChosen():
hcaldigisAnalyzer.simHits = cms.untracked.InputTag("famosSimHits","HcalHits")
from Configuration.Eras.Modifier_run2_HCAL_2017_cff import run2_HCAL_2017
run2_HCAL_2017.toModify(hcaldigisAnalyzer,
TestNumber = cms.bool(True)
)
from Configuration.Eras.Modifier_run2_HEPlan1_2017_cff import run2_HEPlan1_2017
run2_HEPlan1_2017.toModify(hcaldigisAnalyzer,
hep17 = cms.bool(True)
)
from Configuration.Eras.Modifier_phase2_hcal_cff import phase2_hcal
phase2_hcal.toModify(hcaldigisAnalyzer,
dataTPs = cms.InputTag(""),
digiTag = cms.InputTag("simHcalDigis"),
QIE10digiTag = cms.InputTag("simHcalDigis","HFQIE10DigiCollection"),
QIE11digiTag = cms.InputTag("simHcalDigis","HBHEQIE11DigiCollection"),
)
| StarcoderdataPython |
1723555 | <gh_stars>1-10
from fastapi import APIRouter, Depends, status, HTTPException
from fastapi.encoders import jsonable_encoder
from database import get_db
from models.pokemon_type import PokemonType as pokemon_type_model
from schemas.pokemon_type import ShowPokemonType as show_pokemon_type_schema
from schemas.pokemon_type import PokemonType as pokemon_type_schema
from sqlalchemy.orm import Session
def create(pokemon_type, db):
new_pokemon_type = pokemon_type_model(
name = pokemon_type.name
)
db.add(new_pokemon_type)
db.commit()
db.refresh(new_pokemon_type)
return new_pokemon_type
def delete(type_id, db):
type_query = db.query(pokemon_type_model).filter(pokemon_type_model.id == type_id)
if not type_query.first():
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND,
detail = f'The Pokemon Type with the id {type_id} was not found')
type_query.delete(synchronize_session = False)
db.commit()
def update(type_id, pokemon_type, db):
type_query = db.query(pokemon_type_model).filter(pokemon_type_model.id == type_id)
if not type_query.first():
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND,
detail = f'The Pokemon Type with the id {type_id} was not found')
pokemon_type = jsonable_encoder(pokemon_type)
type_query.update(pokemon_type)
db.commit()
def get(pokemon_type_id, db):
pokemon_type = db.query(pokemon_type_model).filter(pokemon_type_model.id == pokemon_type_id).first()
if not pokemon_type:
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND, detail = f'The Pokemon Type with the id {pokemon_type_id} was not found!')
return pokemon_type | StarcoderdataPython |
165435 | <filename>src/FeatureVolumeCacheSequence.py
#!/usr/bin/env python3
# Developed by <NAME> and <NAME>
# This file is covered by the LICENSE file in the root of this project.
# A keras generator which generates batches out of cached feature volumes
import os
import numpy as np
from keras.utils import Sequence
class FeatureVolumeCacheSequence(Sequence):
""" A class which caches feature volumes in CPU memory.
"""
def __init__(self, config, feature_volume_size, cache_size):
""" Initialize the dataset. It is assumed that all feature volumes
are present. No ground truth is used, thus this class is only for inference.
Args:
config: struct with configuration:
Used attributes:
batch_size: size of a batch.
'data_root_folder', 'infer_seqs_map': for path to feature volumes.
feature_volume_size: a tuple with size of the feature volume (heightxwidthxchannels).
cache_size: number of feature volumes to be stored (in CPU memory).
"""
self.datasetpath_map = config['data_root_folder'] + '/' + config['infer_seqs_map']
self.datasetpath_query = config['data_root_folder'] + '/' + config['infer_seqs_query']
self.batch_size = config['batch_size']
self.feature_volume_size = feature_volume_size
self.cache_size = cache_size
# The cache:: feature volumes as
# a numpy array with dimension n x w x h x chans
self.cache = np.zeros((cache_size, feature_volume_size[0], feature_volume_size[1],
feature_volume_size[2]))
# A lookup table for the cache. The filename is used as a key. The
# values is the index in the cache
self.cache_entries = {}
# Vice versa: the key for every entry in the cache
self.key_for_cache_entries = ["" for i in range(0, cache_size)]
self.nextfreeidx = 0
# Statistics
self.no_queries = 0
self.cache_hit = 0
def coord2filename(self, coord):
"""
Input: 1x2 numpy array of X and Y coordinate.
Ouput: complete filename for the feature volume.
"""
new_x = str('{:+.2f}'.format(coord[0])).zfill(10)
new_y = str('{:+.2f}'.format(coord[1])).zfill(10)
file_name = new_x + '_' + new_y
return file_name
def coord_or_idx2filename(self, coord_or_idx):
"""
Args: A numpy array of X and Y coordinate
or an index as int (numpy.integer type).
Returns: filename (without any extension).
"""
if type(coord_or_idx) == np.ndarray:
new_x = str('{:+.2f}'.format(coord_or_idx[0])).zfill(10)
new_y = str('{:+.2f}'.format(coord_or_idx[1])).zfill(10)
file_name = new_x + '_' + new_y
else:
file_name = str(coord_or_idx).zfill(6)
return file_name
def new_task(self, coord_current_frame, coordinates_nearby_grid):
# print('New task with current frame coord', coord_current_frame)
# Number of pairs to infer
self.n = len(coordinates_nearby_grid)
# Convert to filenames
self.map_filenames = []
for i in range(0, self.n):
self.map_filenames.append(self.coord2filename(coordinates_nearby_grid[i]))
self.current_filename = self.coord_or_idx2filename(coord_current_frame)
# prepare first leg: a repeated version of query
fcurrent = self.load_feature_volume(self.current_filename, use_query_seq=True)
self.input1 = np.tile(fcurrent, (self.batch_size, 1, 1, 1,))
# Get a feature volume: either from the cache or load it.
def get_feature_volume(self, batchi):
# print('get_feature_volume %s' % self.map_filenames[batchi] )
# sys.stdout.flush()
self.no_queries += 1
if self.map_filenames[batchi] in self.cache_entries:
self.cache_hit += 1
# print('cache hit')
# sys.stdout.flush()
else:
if len(self.key_for_cache_entries[self.nextfreeidx]) > 0:
# cache entry already used, delete from index
# print('del old entry')
# sys.stdout.flush()
del self.cache_entries[self.key_for_cache_entries[self.nextfreeidx]]
self.key_for_cache_entries[self.nextfreeidx] = ''
self.cache[self.nextfreeidx, :, :, :] = self.load_feature_volume(self.map_filenames[batchi])
self.cache_entries[self.map_filenames[batchi]] = self.nextfreeidx
self.key_for_cache_entries[self.nextfreeidx] = self.map_filenames[batchi]
self.nextfreeidx += 1
if self.nextfreeidx == self.cache_size:
self.nextfreeidx = 0
return self.cache[self.cache_entries[self.map_filenames[batchi]], :, :, :]
def load_feature_volume(self, filename, use_query_seq=False):
if (use_query_seq):
complete_path = self.datasetpath_query + '/feature_volumes/' + filename + '.npz'
else:
complete_path = self.datasetpath_map + '/feature_volumes/' + filename + '.npz'
# print('load %s' % complete_path)
if not os.path.exists(complete_path):
print('ERROR: feature volume %s doest not exist!!!!' % complete_path)
return np.zeros((1, 360, 128))
return np.load(complete_path)['arr_0']
# implemented interface of Sequence base class
def __len__(self):
return int(np.ceil(self.n / float(self.batch_size)))
# implemented interface of Sequence base class
def __getitem__(self, idx):
# print('getitem with ', idx)
# sys.stdout.flush()
maxidx = (idx + 1) * self.batch_size
cb_size = self.batch_size
input1 = self.input1
if maxidx > self.n:
maxidx = self.n
cb_size = maxidx - idx * self.batch_size
input1 = self.input1[0:cb_size, :, :, :]
input2 = np.zeros((cb_size, self.feature_volume_size[0], self.feature_volume_size[1],
self.feature_volume_size[2]))
d = idx * self.batch_size
for batchi in range(idx * self.batch_size, maxidx):
input2[batchi - d, :, :, :] = self.get_feature_volume(batchi)
return ([input1, input2], 0)
def print_statistics(self):
print('Feature volume cache hit rate: %5.1f %%' %
(100.0 * self.cache_hit / self.no_queries))
| StarcoderdataPython |
3309912 | from requirements_diffing import diff_files
def test_diff_files():
pass
| StarcoderdataPython |
4808126 | <gh_stars>1-10
from .types import Number, Boolean, NegNumber
from .operations import Add, Sub, Div, Mul, FloorDiv, Modulo, Exponent
from .comparisons import Lesser, Greater, Equal, NotEqual, LessThanEqual, GreaterThanEqual
from .functions import Log, Abs | StarcoderdataPython |
545 | import json
from cisco_sdwan_policy.BaseObject import BaseObject
class Application(BaseObject):
def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs):
self.type = "appList"
self.id = id
self.name = name
self.references = reference
self.app_family=is_app_family
self._entries = app_list
self.url = "template/policy/list/app"
super().__init__(**kwargs)
self.modified=False
def get_entries(self):
return self._entries
def set_entries(self,entries):
self.modified=True
self._entries=entries
@classmethod
def from_json(cls,jsonfile,**kwargs):
id = jsonfile["listId"]
name = jsonfile["name"]
references = jsonfile.get("references")
if len(jsonfile["entries"])>0 and jsonfile["entries"][0].get("app"):
appFamily=False
entries = [i["app"] for i in jsonfile["entries"]]
else:
if not jsonfile["entries"][0].get("appFamily"):
return None
else:
appFamily=True
entries = [i["appFamily"] for i in jsonfile["entries"]]
return cls(name,entries,appFamily,id,references,**kwargs)
def to_json(self):
return {
"name":self.name,
"description":"Desc Not Required",
"type":"app",
"entries":[
{"appFamily" if self.app_family else "app":i} for i in self._entries]
}
| StarcoderdataPython |
3276010 | from virtualenv import Virtualenv # noqa
from virtualenv import __version__ # noqa
| StarcoderdataPython |
3394968 | <reponame>sdlm/ch-id-card-api<filename>src/predict/coords.py
import numpy as np
import torch
from PIL import Image
from torch import nn
from torchvision import models
from torchvision.transforms import transforms
MODEL_PATH = "/weights/resnet50_regression_v2.2.3.pt"
def load_model(weights_path: str = None):
model = models.resnet50()
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 8)
path = weights_path if weights_path else MODEL_PATH
model.load_state_dict(torch.load(path, map_location="cpu"))
model.eval()
return model
def predict_coords(model, tensor):
coords = model(tensor)
coords = torch.split(coords, 1, dim=1)
coords = np.resize(coords, (4, 2))
return coords.astype("float32")
def get_card_coords(img: Image) -> np.array:
model = load_model()
tensor = transforms.ToTensor()(img).unsqueeze(0)
return predict_coords(model, tensor)
| StarcoderdataPython |
30586 | <filename>_0697_div3/D_Cleaning_the_Phone.py
def solve(n, m, A, B):
ans = 0
ones, twos = [], []
for i in xrange(n):
if B[i] == 1: ones += A[i],
else: twos += A[i],
ones.sort()
twos.sort()
i, j = len(ones)-1, len(twos)-1
while m > 0 and (i >= 0 or j >= 0):
if i >= 0 and ones[i] >= m:
m -= ones[i]
ans += 1
break
mem1, mem2 = float('-inf'), float('-inf')
if i == 0: mem1 = ones[i]
elif i > 0: mem1 = ones[i] + ones[i-1]
if j >= 0: mem2 = twos[j]
if mem1 >= mem2:
m -= ones[i]
i -= 1
ans += 1
else:
m -= mem2
j -= 1
ans += 2
return -1 if m > 0 else ans
for _ in xrange(int(raw_input())):
n, m = map(int, raw_input().split())
A = map(int, raw_input().split())
B = map(int, raw_input().split())
print solve(n, m, A, B)
| StarcoderdataPython |
129140 | <filename>tests/test_inheritance.py
import pytest
from umongo import Document, fields, exceptions
from .common import BaseTest
class TestInheritance(BaseTest):
def test_cls_field(self):
@self.instance.register
class Parent(Document):
last_name = fields.StrField()
class Meta:
allow_inheritance = True
@self.instance.register
class Child(Parent):
first_name = fields.StrField()
assert 'cls' in Child.schema.fields
Child.schema.fields['cls']
assert not hasattr(Parent(), 'cls')
assert Child().cls == 'Child'
loaded = Parent.build_from_mongo(
{'_cls': 'Child', 'first_name': 'John', 'last_name': 'Doe'}, use_cls=True)
assert loaded.cls == 'Child'
def test_simple(self):
@self.instance.register
class Parent(Document):
last_name = fields.StrField()
class Meta:
allow_inheritance = True
collection_name = 'parent_col'
assert Parent.opts.abstract is False
assert Parent.opts.allow_inheritance is True
@self.instance.register
class Child(Parent):
first_name = fields.StrField()
assert Child.opts.abstract is False
assert Child.opts.allow_inheritance is False
assert Child.opts.collection_name == 'parent_col'
assert Child.collection.name == 'parent_col'
Child(first_name='John', last_name='Doe')
def test_abstract(self):
# Cannot define a collection_name for an abstract doc !
with pytest.raises(exceptions.DocumentDefinitionError):
@self.instance.register
class BadAbstractDoc(Document):
class Meta:
abstract = True
collection_name = 'my_col'
@self.instance.register
class AbstractDoc(Document):
abs_field = fields.StrField(missing='from abstract')
class Meta:
abstract = True
assert AbstractDoc.opts.abstract is True
assert AbstractDoc.opts.allow_inheritance is True
# Cannot instanciate also an abstract document
with pytest.raises(exceptions.AbstractDocumentError):
AbstractDoc()
@self.instance.register
class StillAbstractDoc(AbstractDoc):
class Meta:
abstract = True
assert StillAbstractDoc.opts.abstract is True
assert StillAbstractDoc.opts.allow_inheritance is True
@self.instance.register
class ConcreteDoc(AbstractDoc):
pass
assert ConcreteDoc.opts.abstract is False
assert ConcreteDoc.opts.allow_inheritance is False
assert ConcreteDoc().abs_field == 'from abstract'
def test_non_document_inheritance(self):
class NotDoc1:
@staticmethod
def my_func1():
return 24
class NotDoc2:
@staticmethod
def my_func2():
return 42
@self.instance.register
class Doc(NotDoc1, Document, NotDoc2):
a = fields.StrField()
assert issubclass(Doc, NotDoc1)
assert issubclass(Doc, NotDoc2)
assert isinstance(Doc(), NotDoc1)
assert isinstance(Doc(), NotDoc2)
assert Doc.my_func1() == 24
assert Doc.my_func2() == 42
doc = Doc(a='test')
assert doc.my_func1() == 24
assert doc.my_func2() == 42
assert doc.a == 'test'
| StarcoderdataPython |
43532 | <gh_stars>1-10
import os
from datetime import timedelta
import numpy
from esdl.cube_provider import NetCDFCubeSourceProvider
from dateutil.relativedelta import relativedelta
from netCDF4 import num2date
all_vars_descr = {'GPPall': {
'gross_primary_productivity': {
'source_name': 'GPPall',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'gC m-2 day-1',
'long_name': 'Gross Primary Productivity',
'references': 'Tramontana, Gianluca, et al. "Predicting carbon dioxide and energy fluxes across global '
'FLUXNET sites with regression algorithms." (2016).',
'standard_name': 'gross_primary_productivity_of_carbon',
'url': 'http://www.fluxcom.org/',
'project_name' : 'FLUXCOM',
'comment' : 'Gross Carbon uptake of of the ecosystem through photosynthesis',
}},
'TERall': {
'terrestrial_ecosystem_respiration': {
'source_name': 'TERall',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'gC m-2 day-1',
'long_name': 'Terrestrial Ecosystem Respiration',
'references': 'Tramontana, Gianluca, et al. "Predicting carbon dioxide and energy fluxes across global '
'FLUXNET sites with regression algorithms." (2016).',
'standard_name': 'ecosystem_respiration_carbon_flux',
'url': 'http://www.fluxcom.org/',
'project_name' : 'FLUXCOM',
'comment' : 'Total carbon release of the ecosystem through respiration.',
}},
'NEE': {
'net_ecosystem_exchange': {
'source_name': 'NEE',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'gC m-2 day-1',
'long_name': 'Net Ecosystem Exchange',
'references': 'Tramontana, Gianluca, et al. "Predicting carbon dioxide and energy fluxes across global '
'FLUXNET sites with regression algorithms." (2016).',
'standard_name': 'net_primary_productivity_of_carbon',
'url': 'http://www.fluxcom.org/',
'project_name' : 'FLUXCOM',
'comment' : 'Net carbon exchange between the ecosystem and the atmopshere.'
}},
'LE': {
'latent_energy': {
'source_name': 'LE',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'W m-2',
'long_name': 'Latent Energy',
'references': 'Tramontana, Gianluca, et al. "Predicting carbon dioxide and energy fluxes across global '
'FLUXNET sites with regression algorithms." (2016).',
'standard_name': 'surface_upward_latent_heat_flux',
'url': 'http://www.fluxcom.org/',
'project_name' : 'FLUXCOM',
'comment' : 'Latent heat flux from the surface.',
}},
'H': {
'sensible_heat': {
'source_name': 'H',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'W m-2',
'long_name': 'Sensible Heat',
'references': 'Tramontana, Gianluca, et al. "Predicting carbon dioxide and energy fluxes across global '
'FLUXNET sites with regression algorithms." (2016).',
'standard_name': 'surface_upward_sensible_heat_flux',
'url': 'http://www.fluxcom.org/',
'project_name' : 'FLUXCOM',
'comment' : 'Sensible heat flux from the surface'
}},
'Rn': {
'net_radiation': {
'source_name': 'Rn',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'W m-2',
'long_name': 'Net Radiation',
'references': 'Tramontana, Gianluca, et al. "Predicting carbon dioxide and energy fluxes across global '
'FLUXNET sites with regression algorithms." (2016).',
'standard_name': 'surface_net_radiation_flux',
'url': 'http://www.fluxcom.org/',
'project_name' : 'FLUXCOM',
'comment' : 'Net radiation to the surface'
}},
}
class FluxcomProvider(NetCDFCubeSourceProvider):
def __init__(self, cube_config, name='Fluxcom', dir=None, resampling_order=None, var=None):
super(FluxcomProvider, self).__init__(cube_config, name, dir, resampling_order)
self.var_name = var
self.old_indices = None
@property
def variable_descriptors(self):
return all_vars_descr[self.var_name]
def compute_source_time_ranges(self):
source_time_ranges = []
file_names = os.listdir(self.dir_path)
for file_name in file_names:
var_name = file_name.split('.')[0];
if ('.nc' in file_name) and (var_name==self.var_name):
print(file_name)
source_year = int(file_name.replace('.nc', '').split('.')[-1])
if self.cube_config.start_time.year <= source_year <= self.cube_config.end_time.year:
file = os.path.join(self.dir_path, file_name).replace("\\", "/")
dataset = self.dataset_cache.get_dataset(file)
times = dataset.variables['time']
dates = num2date(times[:], 'days since 1582-10-15 00:00:0.0', calendar='gregorian')
self.dataset_cache.close_dataset(file)
for i in range(len(dates)):
# the following checks if the end period overlaps with the next year. If so, change the
# timedelta so that the period stops at the last day of the year
days_increment = 8 if (dates[i] + timedelta(days=8)).year == source_year else \
(dates[i] + timedelta(days=8) - relativedelta(years=1)).day
source_time_ranges.append((dates[i], dates[i] + timedelta(days=days_increment), file, i))
return sorted(source_time_ranges, key=lambda item: item[0])
| StarcoderdataPython |
11190 | import socket
s = socket.socket()
s.bind(("localhost", 9999))
s.listen(1)
sc, addr = s.accept()
while True:
recibido = sc.recv(1024)
if recibido == "quit":
break
print "Recibido:", recibido
sc.send(recibido)
print "adios"
sc.close()
s.close()
| StarcoderdataPython |
4817453 | '''
Test Cases for AnalysisEngine Class for WordCloud Project
<NAME>
Computer-Based Honors Program
The University of Alabama
9.27.2013
'''
import unittest
import os, os.path
from src.core.python.AnalysisEngine import AnalysisEngine
from src.core.python.SupremeCourtOpinion import SupremeCourtOpinion
from src.core.python.SupremeCourtOpinionMetadata import SupremeCourtOpinionMetadata
'''
Yep. Holes.
'''
TEXT1 = \
"""\
There is no lake at Camp Green Lake. There once was a very large lake here, the \
largest lake in Texas. That was over a hundred years ago. Now it is just a dry, \
flat wasteland. \
There used to be a town of Green Lake as well. The town shriveled and dried up \
along with the lake, and the people who lived there. \
During the summer the daytime temperature hovers around ninety-five degrees in \
the shade -- if you can find any shade. There's not much shade in a big dry \
lake. \
The only trees are two old oaks on the eastern edge of the "lake." A hammock is \
stretched between two trees, and a log cabin stands behind that. \
The campers are forbidden to lie in the hammock. It belongs to the Warden. The \
Warden owns the shade. \
Out on the lake, rattlesnakes and scorpions find shade under rocks and in the \
holes dug by the campers. \
Here's a good rule to remember about rattlesnakes and scorpions: If you don't \
bother them, they won't bother you. Usually. \
Being bitten by a scorpion or even a rattlesnake is not the worst thing that \
can happen to you. You won't die. Usually. \
Sometimes a camper will try to be bitten by a scorpion, or even a small \
rattlesnake. Then he will get to spend a day or two recovering in his tent, \
instead of having to dig a hole out on the lake. \
But you don't want to be bitten by a yellow-spotted lizard. That's the worst \
thing that can happen to you. You will die a slow and painful death. Always. \
If you get bitten by a yellow-spotted lizard, you might as well go into the \
shade of the oak trees and lie in the hammock. \
There is nothing anyone can do to you anymore.\
"""
TEXT2 = \
"""\
The reader is probably asking: Why would anyone go to Camp Green Lake? \
Most campers weren't given a choice. Camp Green Lake is a camp for bad boys. \
If you take a bad boy and make him dig a hole every day in the hot sun, it \
will turn him into a good boy. That was what some people thought. \
<NAME> was given a choice. The judge said, "You may go to jail, or \
you may go to Camp Green Lake." \
Stanley was from a poor family. He had never been to camp before.\
"""
TEXT3 = \
"""\
<NAME> was the only passenger on the bus, not counting the driver \
or the guard. The guard sat next to the driver with his seat turned around \
facing Stanley. A rifle lay across his lap. \
Stanley was sitting about ten rows back, handcuffed to his armrest. His \
backpack lay on the seat next to him. It contained his toothbrush, toothpaste \
and a box of stationery his mother had given him. He'd promised to write her \
at least once a week. \
He looked out the window, although there wasn't much to see -- mostly fields \
of hay and cotton. He was on a long bus ride to nowhere. The bus wasn't air-\
conditioned, and the hot, heavy air was almost as stifling as the handcuffs. \
Stanley and his parents had tried to pretend that he was just going away to \
camp for a while, just like rich kids do. When Stanley was younger he used to \
play with stuffed animal, and pretend the animals were at camp. Camp Fun and \
Games he called it. Sometimes he'd have them play soccer with a marble. Other \
times they'd run an obstacle course, or go bungee jumping off a table, tied \
to broken rubber bands. Now Stanley tried to pretend he was going to Camp Fun \
and Games. Maybe he'd make some friends, he thought. At least he'd get to \
swim in the lake.
"""
TEXT4 = \
"""\
He didn't have any friends at home. He was overweight and the kids at his \
middle school often teased him about his size. Even his teachers sometimes \
made cruel comments without realizing it. On his last day of school, his math \
teacher, <NAME>, taught ratios. As an example, she chose the heaviest kid \
in the class and the lightest kid in the class, and had them weigh themselves. \
Stanley weighed three times as much as the other boy. <NAME> wrote the \
ratio on the board, 3:1, unaware of how much embarrassment she had caused \
both of them. \
Stanley was arrested later that day. \
He looked at the guard who sat slumped in his seat and wondered if he had \
fallen asleep. The guard was wearing sunglasses, so Stanley couldn't see his \
eyes. \
Stanley was not a bad kid. He was innocent of the crimes for which he was \
convicted. He'd just been in the wrong place at the wrong time. It was all \
because of his no-good-dirty-rotten-pig-stealing-greate-great-grandfather! \
He smiled. It was a family joke. Whenever anything went wrong, they always \
blamed Stanley's no-good-dirty-rotten-pig-stealing-great-great-grandfather. \\
"""
TEXT5 = \
"""\
Supposedly, he had a great-great-grandfather who had stolen a pig from a one-\
legged Gypsy, and she put a curse on him and all his descendants. Stanley and \
his parents didn't believe in curses, of course, but whenever anything went \
wrong, it felt good to be able to blame someone. \
Things went wrong a lot. They always seemed to be in the wrong place at the \
wrong time. \
He looked out the window at the vast emptiness. He watched the rise and fall \
of a telephone wire. In his mind he could hear his father's gruff voice \
softly sing to him. \
"If only, if only," the woodpecker sighs, "The bark on the tree was just a \
little bit softer." While the wolf waits below, hungry and lonely, He cries \
to the moon, "If only, if only." \
If was a song his father used to sing to him. The melody was sweet and sad, \
but Stanley's favorite part was when his father would howl the world "moon." \
The bus hit a small bump and the guard sat up, instantly alert.\
"""
CURRENT_PATH = os.path.abspath(os.curdir)
def build_subsets():
blank_metadata = SupremeCourtOpinionMetadata()
doc1_output = os.path.join(CURRENT_PATH, "doc1_output")
doc1 = SupremeCourtOpinion(blank_metadata, TEXT1, doc1_output)
doc2_output = os.path.join(CURRENT_PATH, "doc2_output")
doc2 = SupremeCourtOpinion(blank_metadata, TEXT2, doc2_output)
doc3_output = os.path.join(CURRENT_PATH, "doc3_output")
doc3 = SupremeCourtOpinion(blank_metadata, TEXT3, doc3_output)
doc4_output = os.path.join(CURRENT_PATH, "doc4_output")
doc4 = SupremeCourtOpinion(blank_metadata, TEXT4, doc4_output)
doc5_output = os.path.join(CURRENT_PATH, "doc5_output")
doc5 = SupremeCourtOpinion(blank_metadata, TEXT5, doc5_output)
subsets = [[doc1], [doc2], [doc3, doc4, doc5]]
return subsets
class AnalysisEngineTest(unittest.TestCase):
def setUp(self):
self.subsets = build_subsets()
self.test_engine = AnalysisEngine(self.subsets[0], self.subsets[0])
def tearDown(self):
del self.subsets
del self.test_engine
def testAnalyzeWithSingleSubset(self):
print "Testing AnalysisEngine.analyze_docs() with single subset..."
self.test_engine.set_subset(self.subsets[2])
self.test_engine.set_corpus(self.subsets[2])
print self.test_engine.subset
##
self.test_engine.analyze_docs()
##
self.fail("haven't written this test yet")
print "Finished testing AnalysisEngine.analyze_docs()..."
def testAnalyzeWithZeroSubsets(self):
print "Testing AnalysisEngine.analyze_docs() with zero subsets..."
self.assertRaises(Exception, self.test_engine.set_subset, [])
print self.test_engine.subset
##
self.fail("haven't written this test yet")
print "Finished testing AnalysisEngine.analyze_docs()..."
def testAnalysisWithInvalidInput(self):
print "Testing AnalysisEngine.analyze_docs() with invalid input..."
self.subsets.append(["not a doc", "nor is this"])
self.assertRaises(Exception, self.test_engine.set_subset, self.subsets)
print self.test_engine.subset
self.fail("haven't written this test yet")
print "Finished testing AnalysisEngine.analyze_docs()..."
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | StarcoderdataPython |
1740991 | <reponame>maxpit/human-pose-estimation<gh_stars>0
"""
Sets default args
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from absl import flags
import os.path as osp
curr_path = osp.dirname(osp.abspath(__file__))
SMPL_FACE_PATH = osp.join(curr_path, '../src/tf_smpl', 'smpl_faces.npy')
file_dir = osp.join(curr_path, 'data')
lsp_dir = osp.join(file_dir, 'lsp')
lsp_e_dir = osp.join(file_dir, 'lspet_dataset/')
mpii_dir = osp.join(file_dir, 'upi-s1h/data/mpii/')
flags.DEFINE_string('lsp_dir', lsp_dir, 'path to lsp dataset')
flags.DEFINE_string('lsp_e_dir', lsp_e_dir, 'path to lsp extended dataset')
flags.DEFINE_string('lsp_im', osp.join(lsp_dir, 'images/'), 'path to lsp images')
flags.DEFINE_string('lsp_e_im', osp.join(lsp_e_dir, 'images/'), 'path to lsp_ext images')
flags.DEFINE_string('lsp_seg', osp.join(file_dir, 'upi-s1h/data/lsp/'), 'path to lsp segmentations')
flags.DEFINE_string('lsp_e_seg', osp.join(file_dir, 'upi-s1h/data/lsp_extended/'), 'path to lsp_ext segmentations')
flags.DEFINE_string('mpii_dir', mpii_dir, 'path to mpiii dataset')
flags.DEFINE_string('mpii_poses_dir', osp.join(mpii_dir, 'poses.npz'), 'path to mpii dir')
flags.DEFINE_bool('create_lsp', False, 'True if dataset should be created')
flags.DEFINE_bool('create_lsp_val', False, 'True if dataset should be created')
flags.DEFINE_bool('create_lsp_ext', False, 'True if dataset should be created')
flags.DEFINE_bool('create_mpii', False, 'True if dataset should be created')
dataset_dir = osp.join(curr_path, 'datasets')
| StarcoderdataPython |
3354328 | '''
Dictionary to DataFrame (2)
100xp
The Python code that solves the previous exercise is included on the right.
Have you noticed that the row labels (i.e. the labels for the different observations)
were automatically set to integers from 0 up to 6?
To solve this a list row_labels has been created. You can use it to specify the row
labels of the cars DataFrame. You do this by setting the index attribute of cars,
that you can access as cars.index.
Instructions
-Hit Submit Answer to see that, indeed, the row labels are not correctly set.
-Specify the row labels by setting cars.index equal to row_labels.
-Print out cars again and check if the row labels are correct this time.
'''
import pandas as pd
# Build cars DataFrame
names = ['United States', 'Australia', 'Japan', 'India', 'Russia', 'Morocco', 'Egypt']
dr = [True, False, False, False, True, True, True]
cpc = [809, 731, 588, 18, 200, 70, 45]
dict = { 'country':names, 'drives_right':dr, 'cars_per_cap':cpc }
cars = pd.DataFrame(dict)
print(cars)
# Definition of row_labels
row_labels = ['US', 'AUS', 'JAP', 'IN', 'RU', 'MOR', 'EG']
# Specify row labels of cars
cars.index = row_labels
# Print cars again
print(cars)
| StarcoderdataPython |
4810571 | <reponame>felipesch92/cfbCursos
import re #RegEx
txt = 'Olá <NAME>, seja bem vindo ao curso de RegEx'
p = input('Digite a expressão que deseja pesquisar: ')
res = re.findall(p, txt)
qtd_elems = len(res)
print(res)
print(f'Quantidade encontrada: {qtd_elems}')
for r in res:
print(r)
| StarcoderdataPython |
88009 | <gh_stars>10-100
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
'''
Copyright 2014-2015 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ..base import TraitFactory
class TraitObject:
FACTORY = TraitFactory()
def __init__(self, object):
self._object = object
self._compiler = self.FACTORY["Compiler"]()
@classmethod
def __str__(cls):
return cls.__name__.lower().replace('object', '')
@classmethod
def hook_into(cls, inspector):
if inspector.TYPE in cls.INSPECTORS:
inspector.add_hook(cls.__str__(), cls)
@property
def object(self):
return self._object
@property
def qualname(self):
try:
return self._object.__qualname__
except AttributeError:
return type(self._object).__name__
| StarcoderdataPython |
130303 | <reponame>YoruCathy/GarbageNet<gh_stars>1-10
from GarbageClassification import GarbageClassification
gc = GarbageClassification(backbone="MobileNet",gpu="1",logname="realcosinelr")
gc.set_environment()
pipeline = gc.prepare_pipeline()
gc.train(pipeline) | StarcoderdataPython |
1714100 | <filename>Basics/E03_Text/testing/StringFlows.py
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T E X A M P L E S
#
# www.pagebot.io
# Licensed under MIT conditions
#
# -----------------------------------------------------------------------------
#
# 05_StringFlows.py
#
# If a Text as self.nextElement defined as name for another text box on the
# same page, then overflow of self will go into the other text box.
from pagebot.constants import LEFT, BOTTOM, TOP
from pagebot.conditions import *
from pagebot.elements import *
from pagebot.document import Document
from pagebot.toolbox.color import color, blackColor, noColor, whiteColor
DoTextFlow = True
BoxWidth = 500
PagePadding = 30
PageSize = 500
# Export in _export folder that does not commit in Git. Force to export PDF.
EXPORT_PATH = '_export/UseTextFlows.pdf'
def makeDocument():
"""Make a new document."""
W = H = PageSize
# Create a new document, default to the defined page size.
doc = Document(w=W, h=H, title='Text Flow', autoPages=2)
view = doc.view
c = view.context
view.padding = 0 # Aboid showing of crop marks, etc.
view.showCropMarks = True
view.showRegistrationMarks = True
view.showFrame = True
view.showPadding = True
view.showOrigin = True
view.showDimensions = False
# Get list of pages with equal y, then equal x.
#page = doc[1][0] # Get the single page from the document.
page1 = doc[1] # Get page on pageNumber, first in row (there is only one now).
page1.name = 'Page 1'
page1.padding = PagePadding
bs = c.newString('')
for n in range(10):
bs += c.newString('(Line %d) ' % (n+1), style=dict(font='Verdana-Bold', fontSize=9, leading=10, textFill=blackColor))
bs += c.newString('Volume of text defines the box height. Volume of text defines the box height. \n',
style=dict(font='Verdana', fontSize=9, leading=10, textFill=blackColor))
if DoTextFlow:
h1 = 120 # Fox on a given height, to show the text flowing to the e2 element.
else:
h1 = None
e1 = newText(bs,
name='ElasticText1',
nextElement='ElasticText2', # Overflow goes here.
parent=page0, padding=4, x=100, w=BoxWidth, font='Verdana', h=h1,
mb=20, mr=10, # Conditions make the element move to top-left of the page.
# And the condition that there should be no overflow, otherwise the text box
# will try to solve it.
conditions=[Left2Left(), Float2Top(), Overflow2Next()],
# Position of the origin of the element. Just to show where it is.
# Has no effect on the position conditions.
yAlign=BOTTOM, xAlign=LEFT,
strokeWidth=0.5, fill=color(0.9), stroke=noColor,
)
e2 = newText('', # Empty box, will get the overflow from e1, if there is any.
name='ElasticText2', # Flow reference by element.name
nextElementName='ElasticText3', nextPageName='Page 2',
parent=page1, padding=4, x=100, w=BoxWidth, h=200,
conditions=[Right2Right(), Float2Top(), Fit2Bottom(), Overflow2Next()], yAlign=TOP, fill=whiteColor, stroke=noColor,
)
# Get next page, to show flow running over page breaks.
page2 = page1.next
page2.name = 'Page 2'
page2.padding = PagePadding
e3 = newText('', # Empty box, will get the overflow from e2, if there is any.
name='ElasticText3', # Flow reference by element.name
parent=page1, padding=4, w=BoxWidth,
conditions=[Right2Right(), Float2Top(), Fit2Bottom()],
yAlign=TOP, fill=whiteColor, stroke=noColor)
score = doc.solve() # Try to solve all pages.
if score.fails:
print(score.fails)
return doc # Answer the doc for further doing.
if __name__ == '__main__':
d = makeDocument()
d.context.Variable([
#dict(name='ElementOrigin', ui='CheckBox', args=dict(value=False)),
dict(name='DoTextFlow', ui='CheckBox', args=dict(value=True)),
dict(name='BoxWidth', ui='Slider', args=dict(minValue=100, value=500, maxValue=PageSize)),
dict(name='PagePadding', ui='Slider', args=dict(minValue=0, value=30, maxValue=100)),
dict(name='PageSize', ui='Slider', args=dict(minValue=200, value=500, maxValue=PageSize)),
], globals())
d.export(EXPORT_PATH)
| StarcoderdataPython |
1626956 | <filename>Misc. Practice Code Junk/to_prime_or_not_to_prime.py<gh_stars>0
def to_prime_or_not_to_prime(number):
#naive python function to check whether a given number is prime or not...
#"seldomly asked in interviews"-as stated by Jose.
for num in range(2, number):
if number % num == 0:
print(number, "is not prime.")
break
else:
print(number, "is prime.")
to_prime_or_not_to_prime(69)
to_prime_or_not_to_prime(73)
to_prime_or_not_to_prime(37)
to_prime_or_not_to_prime(12)
print("but am I a dork ?")
import math
def better_to_prime_or_not_to_prime(randomumber):
if randomumber%2 == 0 and randomumber>2:
return False
for numb in range(3, int(math.sqrt(randomumber))+1, 2):
if randomumber%2 == 0:
return False
return True
better_to_prime_or_not_to_prime(73)
better_to_prime_or_not_to_prime(30)
'''
This Function is an improvement from the previous function.
It uses a different approach towards finding a prime number.
Further step-by-step explaination of each code line in commments.
starting from 3 beacuse 2 is a prime number by being an even number as well.Then we set the range from 3 to the square root of the number to be tested to lessen down the factors and add one to it and go ahead in jumps of 2 to get only odd numbers''' | StarcoderdataPython |
100203 | from bs4 import BeautifulSoup as bs
import requests
from csv import DictWriter
print("Import sukses")
def scrapeWeb():
halaman = int(input("Berapa halaman? "))
jenis_halam = input("Scrape halaman mana? ").lower()
all_news = []
for x in range(1,halaman + 1):
cap = requests.get(f"https://mojok.co/{jenis_halam}/page/{x}")
soup = bs(cap.text, "html.parser")
contain = soup.find(class_="jeg_postblock")
posts = contain.find_all(class_="jeg_post")
for post in posts:
uhuys = post.find(class_="jeg_post_title")
author = post.find(class_="jeg_meta_author")
all_news.append({
"Jungdul": uhuys.find("a").get_text(),
"Link": uhuys.find("a")["href"],
"Author": author.find("a").get_text()
})
# btn = soup.find(class_="next")
# print(str(len(all_news)) + " artikel ditemukan")
return all_news
news = scrapeWeb()
with open("news.csv","w") as file:
headers = ["Jungdul","Link","Author"]
csv_writer = DictWriter(file,fieldnames=headers)
csv_writer.writeheader()
for new in news:
csv_writer.writerow(new)
print("scrape success")
| StarcoderdataPython |
63158 | {
'targets': [
{
'target_name': 'example1',
'sources': ['manifest.c'],
'libraries': [
'../../target/release/libnapi_example1.a',
],
'include_dirs': [
'../napi/include'
]
}
]
}
| StarcoderdataPython |
176571 | <filename>gpa/scripts/gpa_stats.py
# coding=utf-8
# Copyright 2018 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import tensorflow as tf
from tensor2tensor.utils import usr_dir
from tensor2tensor import problems
import warnings
warnings.filterwarnings('ignore')
from gpa.scripts.decoding_utils import load_model, build_model, stats
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, required=True)
parser.add_argument('--model_dir', type=str, required=True)
parser.add_argument('--output_dir', type=str, required=True)
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--problem_name', type=str, default="grapheme_to_phoneme")
parser.add_argument('--model_name', type=str, default="transformer")
parser.add_argument('--hparams_set', type=str, default="g2p")
parser.add_argument('--t2t_usr_dir', type=str, default=os.path.join(__location__, "../submodule"))
parser.add_argument('--weights', type=list, default=[50, 30, 20])
# parser.add_argument('--freq_column', type=list, default=[50, 30, 20])
args = parser.parse_args()
wordList = []
phon = []
with open(args.data, 'r') as f:
for l in f.readlines():
if ',' in l:
csv_sep = ','
elif ';' in l:
csv_sep = ';'
else:
raise ValueError
source, target = l.strip().split(csv_sep)
wordList.append(source)
phon.append(target)
usr_dir.import_usr_dir(args.t2t_usr_dir)
input_tensor, input_phon_tensor, output_phon_tensor, encdec_att_mats = build_model(
args.hparams_set, args.model_name,
args.data_dir, args.problem_name,
beam_size=1)
problem = problems.problem(args.problem_name)
encoder = problem.feature_encoders(args.data_dir)
sess = tf.Session()
assert load_model(args.model_dir, sess)
rstats, gpProg = stats(sess, wordList, phon, input_tensor, input_phon_tensor, output_phon_tensor, encdec_att_mats,
encoder, args.weights)
rstats.to_csv(os.path.join(args.output_dir, "stats.csv"))
gpProg.to_csv(os.path.join(args.output_dir, "gpProg.csv"), index=False)
if __name__ == "__main__":
tf.app.run()
| StarcoderdataPython |
180075 | <filename>arl_and_re/arl/gen_run.py
import argparse
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--output", default='job.sh', type=str,
help="output sh file name")
parser.add_argument("--gpuid", default=0, type=int,
help="output sh file name")
args = parser.parse_args()
output_name = args.output
save = open(output_name, 'w')
save.write('#!/bin/bash' + '\n')
source_language = 'en'
gpu = args.gpuid
for rndseed in [0]:
for lr in [3e-5, 5e-5, 7e-5, 9e-5]:
for batchsize in [32]:
for max_epoch in [3, 5, 7]:
for warmup in [0.4]:
script = "python main_bibert.py --exp_name gpu%d --gpuid %d --batchsize %d --warmup_proportion %.5f" \
" --learning_rate %.6f --max_epoch %d" % \
(gpu, gpu, batchsize, warmup, lr, max_epoch)
save.write(script + '\n')
save.close() | StarcoderdataPython |
3372169 | <filename>TRIANGULO.py
L1 = float(input('Digite o primeiro L:'))
L2 = float(input('Digite o segundo L:'))
L3 = float(input('Digite o terceiro L:'))
if L1 < L2 + L3 and L1 < L3 + L2 and L3 < L1 + L2:
print('Show, formou um TRIÂNGULO', end=' ')
if L1 == L2 == L3:
print('EQUILÁTERO: {},{},{}'.format(L1,L2,L3))
elif L1 != L2 !=L3 !=L1:
print("ESCALENO: {},{},{} ".format(L1,L2,L3))
else:
print('ISÓCELES: {}, {}, {}'.format(L1,L2,L3))
else:
print("Infelizmente não forma TRIÂNGULO")
| StarcoderdataPython |
4811242 | text = "X-DSPAM-Confidence: 0.8475";
s = text.find(' ')
newText = text[s:].strip()
n = float(newText)
print(n)
| StarcoderdataPython |
3221217 | <reponame>jdswalker/Advent-of-Code-2015<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Advent of Code 2015 from http://adventofcode.com/2015/day/17
Author: <NAME>
Copyright: MIT license
--- Day 17: No Such Thing as Too Much ---
The elves bought too much eggnog again - 150 liters this time. To fit it all
into your refrigerator, you'll need to move it into smaller containers. You
take an inventory of the capacities of the available containers.
For example, suppose you have containers of size 20, 15, 10, 5, and 5 liters.
If you need to store 25 liters, there are four ways to do it:
15 and 10
20 and 5 (the first 5)
20 and 5 (the second 5)
15, 5, and 5
Filling all containers entirely, how many different combinations of
containers can exactly fit all 150 liters of eggnog?
Answer: 1304
--- Day 17: Part Two ---
While playing with all the containers in the kitchen, another load of eggnog
arrives! The shipping and receiving department is requesting as many
containers as you can spare.
Find the minimum number of containers that can exactly fit all 150 liters of
eggnog. How many different ways can you fill that number of containers and
still hold exactly 150 litres?
In the example above, the minimum number of containers was two. There were
three ways to use that many containers, and so the answer there would be 3.
Answer: 18
"""
# Standard Library Imports
from itertools import combinations
# Application-specific Imports
from advent_of_code.solvers import solver
class Solver(solver.AdventOfCodeSolver):
"""Advent of Code 2015 Day 17: No Such Thing as Too Much
Attributes:
puzzle_input (list): A list of instructions for solving the puzzle
puzzle_title (str): Name of the Advent of Code puzzle
solved_output (str): A template string for solution output
"""
def __init__(self, *args):
solver.AdventOfCodeSolver.__init__(self, *args)
self._solved_output = '\n'.join((
'The number of 150 litre container combinations is {0}.',
'The number of 150 litre fewest container combinations is {1}.',
))
@staticmethod
def _get_150_litre_combos(cups, min_length_combos=False):
"""
Args:
cups (list):
min_length_combos (bool):
Returns:
list:
"""
cup_combos = []
for length in range(1, len(cups) + 1):
cup_combos.extend((
tuple(combo) for combo in combinations(cups, length)
if sum(combo) == 150
))
if min_length_combos and cup_combos:
break
return cup_combos
def _solve_puzzle_parts(self):
"""Solves each part of a Advent of Code 2015 puzzle
Args: None
Returns:
tuple: Pair of solutions for the two parts of the puzzle
"""
cups = [int(cup) for cup in self.puzzle_input.splitlines()]
count_all_combos = len(self._get_150_litre_combos(cups, False))
count_min_length_combos = len(self._get_150_litre_combos(cups, True))
return (count_all_combos, count_min_length_combos)
def run_test_cases(self):
"""Runs a series of inputs and compares against expected outputs
Args: None
Returns: None
"""
test_input = '\n'.join(('120', '90', '60', '30', '30'))
self._run_test_case(solver.TestCase(test_input, 4, 3))
| StarcoderdataPython |
1789998 | import functools
import numpy
from pydrake.common.eigen_geometry import AngleAxis, Quaternion
from pydrake.math import ComputeBasisFromAxis
from pydrake.math import RotationMatrix
from pydrake.multibody.tree import Joint_, RevoluteJoint_
from pydrake.systems.framework import BasicVector_, LeafSystem
from visualization_msgs.msg import (InteractiveMarker,
InteractiveMarkerControl,
InteractiveMarkerFeedback)
class MovableJoints(LeafSystem):
def __init__(self, server, tf_buffer, joints):
super().__init__()
# TODO(sloretz) do I need to store allllll of this in the context?
# if so, how tf do I get the context in callbacks?
self._tf_buffer = tf_buffer
# System will publish transforms for these joints only
self._joints = tuple(joints)
if 0 == len(self._joints):
raise ValueError('Need at least one joint')
for joint in self._joints:
if not Joint_.is_subclass_of_instantiation(type(joint)):
raise TypeError('joints must be an iterable of Joint_[T]')
self._server = server
self._joint_states = [0.0] * len(self._joints)
# Map joint names to indexes in joint_states
self._joint_indexes = {j.name(): i for j, i in zip(self._joints, range(len(self._joints)))}
self._joint_prev_orientation = {j.name(): Quaternion() for j in self._joints}
self._joint_axis_in_child = {}
for joint in self._joints:
if RevoluteJoint_.is_subclass_of_instantiation(type(joint)):
server.insert(
self._make_revolute_marker(joint),
feedback_callback=functools.partial(self._revolute_feedback, joint))
else:
# TODO(sloretz) support more than revolute joints
raise TypeError('joints must be an iterable of RevoluteJoint_[T]')
server.applyChanges()
self.DeclareVectorOutputPort(
'joint_states',
BasicVector_[float](len(self._joint_states)),
self._do_get_joint_states)
def _revolute_feedback(self, joint, feedback):
if feedback.event_type != InteractiveMarkerFeedback.POSE_UPDATE:
return
expected_frame = joint.child_body().body_frame().name()
if expected_frame != feedback.header.frame_id:
# TODO(sloretz) fix tf2_geometry_msgs_py :(
# transformed_point = self._tf_buffer.transform(point_stamped, self._frame_id)
print("TODO accept feedback in different frame")
return
qw = feedback.pose.orientation.w
qx = feedback.pose.orientation.x
qy = feedback.pose.orientation.y
qz = feedback.pose.orientation.z
new_orientation = Quaternion(qw, qx, qy, qz)
prev_orientation = self._joint_prev_orientation[joint.name()]
orientation_diff = prev_orientation.inverse().multiply(new_orientation)
diff_aa = AngleAxis(orientation_diff)
joint_axis = self._joint_axis_in_child[joint.name()]
dot = numpy.dot(joint_axis, diff_aa.axis())
if dot > 0.999:
angle_inc = diff_aa.angle()
elif dot < -0.999:
angle_inc = -1 * diff_aa.angle()
else:
angle_inc = 0.
angle = self._joint_states[self._joint_indexes[joint.name()]] + angle_inc
if angle > joint.position_upper_limit():
angle = joint.position_upper_limit()
elif angle < joint.position_lower_limit():
angle = joint.position_lower_limit()
self._joint_states[self._joint_indexes[joint.name()]] = angle
self._joint_prev_orientation[joint.name()] = new_orientation
def _do_get_joint_states(self, context, data):
data.SetFromVector(self._joint_states)
def _make_revolute_marker(self, revolute_joint: RevoluteJoint_):
int_marker = InteractiveMarker()
int_marker.header.frame_id = revolute_joint.child_body().body_frame().name()
int_marker.name = revolute_joint.name()
int_marker.scale = 0.3
int_marker.pose.position.x = 0.
int_marker.pose.position.y = 0.
int_marker.pose.position.z = 0.
int_marker.pose.orientation.w = 1.
int_marker.pose.orientation.x = 0.
int_marker.pose.orientation.y = 0.
int_marker.pose.orientation.z = 0.
# Drake revolute axis is in frame F on parent
axis_hat = revolute_joint.revolute_axis()
self._joint_axis_in_child[revolute_joint.name()] = axis_hat
# What rotation would get the parent X axis to align with the joint axis?
rotation_matrix = ComputeBasisFromAxis(0, axis_hat)
pydrake_quat = RotationMatrix(rotation_matrix).ToQuaternion()
joint_control = InteractiveMarkerControl()
joint_control.orientation.w = pydrake_quat.w()
joint_control.orientation.x = pydrake_quat.x()
joint_control.orientation.y = pydrake_quat.y()
joint_control.orientation.z = pydrake_quat.z()
joint_control.always_visible = True
joint_control.name = f'rotate_axis_{revolute_joint.name()}'
joint_control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS
int_marker.controls.append(joint_control)
return int_marker
| StarcoderdataPython |
3277441 | <filename>Example-Project/src/plots.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import *
import seaborn as sns
import statsmodels.api as sm
sns.set_style("whitegrid", {'axes.edgecolor': '.6',
'axes.facecolor': '0.9',
'grid.color': '.82',
'legend.frameon': True,
'axes.labelsize': 'small'})
def get_lagged_series(series, order=1, log=False):
lagged = None
if log:
lagged = np.log(series).diff(order).dropna();
else:
lagged = series.diff(order).dropna();
return lagged
def plot_stocks(index, stocks, labels, positions=None, label_annually=True):
"""
Plots up to 5 stocks
Args:
index (DateTimeIndex): date range
stocks (list): list of stocks to plot
labels (list): labels to use for plotting
positions (list of dicts): optional postions to overplot
label_annually (boolean): plots x label monthly if false, annually otherwise
Returns (None): Will output plot inline
"""
colors = ['firebrick','steelblue','orange','mediumorchid', 'mediumseagreen']
fig, ax = plt.subplots(figsize=(20,8));
if label_annually:
span = YearLocator();
my_format = DateFormatter('%Y');
else:
span = MonthLocator();
my_format = DateFormatter('%b %Y');
ax.xaxis.set_major_locator(span);
ax.xaxis.set_major_formatter(my_format);
ax.autoscale_view();
plt.title('Adjusted Close Prices', fontsize=20);
plt.ylabel('Adj. Close', fontsize=15);
plt.xlabel('Time', fontsize=15);
for stock, label, color in zip(stocks, labels, colors):
ax.plot_date(index, stock, color=color, linestyle='-', marker=None, label=label);
plt.xticks(rotation=45)
if positions:
for position in positions:
ax.axvline(df.index[position['open']], color='lime', linestyle='-')
ax.axvline(df.index[position['close']], color='red', linestyle='-')
plt.legend(loc=2, prop={'size':15}, frameon=True);
def plot_lagged_series(series, order=1, log=False):
"""
Plots lagged series
Args: series (ndarray)---time series
order (int)--------the order of the difference
log (boolean)------whether to log transform before difference
Returns: (None) plots inline
"""
plt.figure(figsize=(12,4));
plt.xlabel('Time Index');
plt.ylabel('Difference');
if log:
plt.title('Log Series Lagged by ' + str(order));
lagged = get_lagged_series(series, order, log)
else:
plt.title('Series Lagged by ' + str(order));
lagged = get_lagged_series(series, order, log)
plt.plot(np.arange(0,lagged.size,1), lagged);
def plot_correlograms(series, limit=20):
"""
Plots autocorrelations
Args: series (ndarray)---the time series to view plots for
limit (ndarry)-----the number of lags to see
Returns: (None) plots inplace
"""
fig = plt.figure(figsize=(15,8));
fig.subplots_adjust(hspace=.5)
ax1 = fig.add_subplot(211);
fig = sm.graphics.tsa.plot_acf(series, lags=limit, ax=ax1);
plt.title('Correlogram');
plt.xticks(np.arange(0,limit+1,1))
plt.xlim([-1,limit])
plt.xlabel('Lag')
plt.ylabel('Autocorrelation')
ax2 = fig.add_subplot(212);
fig = sm.graphics.tsa.plot_pacf(series, lags=limit, ax=ax2);
plt.title('Partial Correlogram');
plt.xticks(np.arange(0,limit+1,1))
plt.xlim([-1,limit])
plt.xlabel('Lag')
plt.ylabel('Partial Autocorrelation')
def plot_pair(series1, series2, names):
"""
Plot pair series with name subsitutions
Args: series1 (ndarray)---first stock of the pair
series2 (ndarray)---second stock of the pair
names (list)--------list of names for the stocks
Returns: (None) plots inline
"""
fig, ax = plt.subplots(figsize=(20,8));
years = YearLocator();
yearsFmt = DateFormatter('%Y');
ax.xaxis.set_major_locator(years);
ax.xaxis.set_major_formatter(yearsFmt);
ax.autoscale_view();
index = series1.index
plt.title(names[0] + ' and ' +
names[1] +' (Adj. Close)', fontsize=20);
plt.ylabel('Adj. Close', fontsize=15);
plt.xlabel('Time', fontsize=15);
ax.plot_date(index, series1, 'indianred', label=names[0]);
ax.plot_date(index, series2, 'steelblue', label=names[1]);
plt.legend(loc=2, prop={'size':15}, frameon=True);
def plot_ratio(ratio, name, deviations=[1], positions=[]):
"""
Plots the ratio of the stocks
Args: ratio (ndarray)----the ratio of the stocks in question
name (string)------the name of the ratio
devations (list)---the devations to plot
positions (list)---the positions to plot
Returns: (None) plots inplace
"""
fig = plt.subplots(figsize=(20,8));
plt.title('Ratio ' + name + ' Adjusted Close', fontsize=20);
plt.ylabel('Ratio', fontsize=15);
plt.xlabel('Time Index', fontsize=15);
plt.xlim([0,ratio.size])
plt.xticks(np.arange(0, ratio.size, 500))
plt.plot(np.arange(ratio.size), ratio, 'black', label='$Ratio$', alpha=0.5);
plt.plot([0, ratio.size], [ratio.mean(), ratio.mean()], 'steelblue', lw=2, label=r'$\hat{\mu}$');
for color, std in zip(['y','orange','salmon','red'], deviations):
latex_prep = '$' + str(std) + '$'
plt.plot([0, ratio.size], [ratio.mean()-std*ratio.std(), ratio.mean()-std*ratio.std()],
'--', lw=2, label='$\hat{\mu} \pm$' + latex_prep + '$\hat{\sigma}$', color=color);
plt.plot([0, ratio.size], [ratio.mean()+std*ratio.std(), ratio.mean()+std*ratio.std()],
'--', lw=2, color=color);
if positions:
opening_days, closing_days = [], []
opening_ratios, closing_ratios = [], []
for position in positions:
if 'open' in position.keys():
for day in position['open']:
opening_days.append(day)
opening_ratios.append(ratio.ix[day])
if 'close' in position.keys():
closing_days.append(position['close'])
closing_ratios.append(ratio.ix[position['close']])
plt.scatter(x=opening_days, y=opening_ratios, s=125, color='lime', edgecolor='black', label='$Open$ '+'$Position$')
plt.scatter(x=closing_days, y=closing_ratios, s=125, color='red', edgecolor='black', label='$Close$ '+'$Position$')
plt.legend(loc='best', prop={'size':15}, frameon=True);
| StarcoderdataPython |
106984 | """
Copyright 2017 Platform9 Systems Inc.(http://www.platform9.com)
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import logging
from credsmgrclient.common.constants import provider_values
from credsmgrclient.encrypt import ENCRYPTOR
LOG = logging.getLogger(__name__)
def _get_encrypted_values(provider):
try:
return provider_values[provider]['encrypted_values']
except KeyError:
raise Exception("Provider %s is not valid" % provider)
def _decrypt_creds(creds, encrypted_values):
for k, v in creds.items():
if k in encrypted_values:
creds[k] = ENCRYPTOR.decrypt(v)
class CredentialManager(object):
def __init__(self, http_client):
self.client = http_client
def credentials_get(self, provider, tenant_id):
"""Get the information about Credentials.
:param provider: Name of Omni provider
:type: str
:param tenant_id: tenant id to look up
:type: str
:rtype: dict
"""
resp, body = self.client.get("/%s" % provider,
data={"tenant_id": tenant_id})
LOG.debug("Get Credentials response: {0}, body: {1}".format(
resp, body))
if body:
encrypted_values = _get_encrypted_values(provider)
_decrypt_creds(body, encrypted_values)
return resp, body
def credentials_list(self, provider):
"""Get the information about Credentials for all tenants.
:param provider: Name of Omni provider
:type: str
:rtype: dict
"""
resp, body = self.client.get("/%s/list" % provider)
LOG.debug("Get Credentials list response: {0}, body: {1}".format(
resp, body))
if body:
encrypted_values = _get_encrypted_values(provider)
for creds in body.values():
_decrypt_creds(creds, encrypted_values)
return resp, body
def credentials_create(self, provider, **kwargs):
"""Create a credential.
:param provider: Name of Omni provider
:type: str
:param body: Credentials for Omni provider
:type: dict
:rtype: dict
"""
resp, body = self.client.post("/%s" % provider,
data=kwargs.get('body'))
LOG.debug("Post Credentials response: {0}, body: {1}".format(resp,
body))
return resp, body
def credentials_delete(self, provider, credential_id):
"""Delete a credential.
:param provider: Name of Omni provider
:type: str
:param credential_id: ID for credential
:type: str
"""
resp, body = self.client.delete("/%s/%s" % (provider, credential_id))
LOG.debug("Delete Credentials response: {0}, body: {1}".format(
resp, body))
def credentials_update(self, provider, credential_id, **kwargs):
"""Update credential.
:param provider: Name of Omni provider
:type: str
:param credential_id: ID for credential
:type: str
"""
resp, body = self.client.put("/%s/%s" % (provider, credential_id),
data=kwargs.get('body'))
LOG.debug("Update Credentials response: {0}, body: {1}".format(
resp, body))
return resp, body
def credentials_association_create(self, provider, credential_id,
**kwargs):
resp, body = self.client.post(
"/%s/%s/association" % (provider, credential_id),
data=kwargs.get('body'))
LOG.debug("Create Association response: {0}, body: {1}".format(
resp, body))
def credentials_association_delete(self, provider, credential_id,
tenant_id):
resp, body = self.client.delete(
"/%s/%s/association/%s" % (provider, credential_id, tenant_id))
LOG.debug("Delete Association response: {0}, body: {1}".format(
resp, body))
def credentials_association_list(self, provider):
resp, body = self.client.get("/%s/associations" % provider)
LOG.debug("List associations response: {0}, body: {1}".format(
resp, body))
return resp, body
| StarcoderdataPython |
126161 | <filename>pyexlatex/models/format/text/color/deftypes/hex.py<gh_stars>1-10
from typing import Optional
from pyexlatex.models.format.text.color.deftypes.base import ColorDefinition
class Hex(ColorDefinition):
"""
Define a color using a hex code, such as #21ad2a
"""
definition_type = 'HTML'
def __init__(self, hex_code: str, color_name: Optional[str] = None):
self.hex_code = hex_code.strip('#')
super().__init__(color_content=self.hex_code, color_name=color_name)
| StarcoderdataPython |
1648995 | # user settings, included in settings.py
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
# SECURITY WARNING: Make this unique, and don't share it with anybody.
SECRET_KEY = ''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(BASE_DIR, 'openkamer.sqlite'), # Or path to database file if using sqlite3.
}
}
LANGUAGE_CODE = 'nl-NL'
TIME_ZONE = "Europe/Amsterdam"
ALLOWED_HOSTS = ['*']
#STATIC_ROOT = '/home/username/webapps/openkamerstatic/'
STATIC_ROOT = ''
# URL prefix for static files.
#STATIC_URL = '//www.openkamer.org/static/'
STATIC_URL = '/static/'
#MEDIA_ROOT = '/home/<username>/webapps/<projectstatic>/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'website/static/media/')
#MEDIA_URL = '//www.<your-domain>.com/static/media/'
MEDIA_URL = '/static/media/'
# DBBACKUP
DBBACKUP_STORAGE = 'django.core.files.storage.FileSystemStorage'
DBBACKUP_STORAGE_OPTIONS = {'location': os.path.join(BASE_DIR, STATIC_ROOT, 'backup/')}
# DJANGO-CRON
CRON_LOCK_DIR = '/tmp'
CRON_CLASSES = [
# 'website.cron.TestJob',
'website.cron.BackupDaily',
'website.cron.CleanUnusedPersons',
'website.cron.MergeDuplicatePersons',
# 'website.cron.UpdateSubmitters'
# 'website.cron.UpdateParliamentAndGovernment',
# 'website.cron.UpdateActiveDossiers',
# 'website.cron.UpdateInactiveDossiers',
# 'website.cron.UpdateVerslagenAlgemeenOverleg',
# 'website.cron.UpdateKamervragenRecent',
# 'website.cron.UpdateKamervragenAll',
# 'oktwitter.cron.UpdateTwitterLists',
# 'website.cron.UpdateStatsData',
# 'website.cron.UpdateGifts',
# 'website.cron.UpdateTravels',
# 'website.cron.UpdateSearchIndex',
# 'website.cron.CreateCSVExports'
]
# OPENKAMER
CONTACT_EMAIL = '<EMAIL>'
OK_TMP_DIR = os.path.join(BASE_DIR, 'data/tmp/')
CSV_EXPORT_PATH = os.path.join(BASE_DIR, STATIC_ROOT, 'csv/')
# DOCUMENT
NUMBER_OF_LATEST_DOSSIERS = 6
AGENDAS_PER_PAGE = 50
DOSSIERS_PER_PAGE = 20
VOTINGS_PER_PAGE = 20
# PIWIK
PIWIK_URL = '' # optional, without trailing slash
PIWIK_SITE_ID = 0
# TWEEDEKAMER DATA REPO
GIT_AUTHOR_NAME = ''
GIT_AUTHOR_EMAIL = ''
DATA_REPO_DIR = '<path-to-repo>/ok-tk-data/'
# TWITTER
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
TWITTER_ACCESS_TOKEN_KEY = ''
TWITTER_ACCESS_TOKEN_SECRET = ''
| StarcoderdataPython |
136834 | <reponame>gldnspud/kafka-influxdb
try:
import ujson as json
except ImportError:
import json
import logging
try:
# Test for mypy support (requires Python 3)
from typing import List, Text
except:
pass
class Encoder(object):
"""
An encoder for the Collectd JSON format
See https://collectd.org/wiki/index.php/JSON
Sample measurements:
[{"values":[0],"dstypes":["derive"],"dsnames":["value"],"time":1436372292.412,"interval":10.000,"host":"26f2fc918f50","plugin":"cpu","plugin_instance":"1","type":"cpu","type_instance":"interrupt"}]
[
{
"values": [1901474177],
"dstypes": ["counter"],
"dsnames": ["value"],
"time": 1280959128,
"interval": 10,
"host": "leeloo.octo.it",
"plugin": "cpu",
"plugin_instance": "0",
"type": "cpu",
"type_instance": "idle"
}
]
The following measurement format is also supported, which has more than one value for each sample.
[{"values":[0.2, 0.3],"dstypes":["derive"],"dsnames":["cpu_usage", "mem_usage"],"time":1436372292.412,"interval":10.000,"host":"26f2fc918f50","plugin":"cpu","plugin_instance":"1","type":"cpu","type_instance":"interrupt"}]
"""
def encode(self, msg):
# type: (bytes) -> List[Text]
measurements = []
for line in msg.decode().split("\n"):
try:
# Set flag for float precision to get the same
# results for Python 2 and 3.
json_object = self.parse_line(line)
except ValueError as e:
logging.debug("Error in encoder: %s", e)
continue
for entry in json_object:
try:
# to set plugin, plugin_instance as the measurement name, just need pass ['plugin', 'plugin_instance']
measurement = Encoder.format_measurement_name(
entry, ['plugin', 'plugin_instance', 'type'])
tags = Encoder.format_tags(
entry, ['host', 'type_instance'])
value = Encoder.format_value(entry)
time = Encoder.format_time(entry)
measurements.append(Encoder.compose_data(
measurement, tags, value, time))
except Exception as e:
logging.debug("Error in input data: %s. Skipping.", e)
continue
return measurements
@staticmethod
def parse_line(line):
# return json.loads(line, {'precise_float': True})
# for influxdb version > 0.9, timestamp is an integer
return json.loads(line)
# following methods are added to support customizing measurement name, tags much more flexible
@staticmethod
def compose_data(measurement, tags, value, time):
data = "{0!s},{1!s} {2!s} {3!s}".format(measurement, tags, value, time)
return data
@staticmethod
def format_measurement_name(entry, args):
name = []
for arg in args:
if arg in entry:
# avoid to add extra _ if some entry value is None
if entry[arg] != '':
name.append(entry[arg])
return '_'.join(name)
@staticmethod
def format_tags(entry, args):
tag = []
for arg in args:
if arg in entry:
# to avoid add None as tag value
if entry[arg] != '':
tag.append("{0!s}={1!s}".format(arg, entry[arg]))
return ','.join(tag)
@staticmethod
def format_time(entry):
return int(float(entry['time']))
@staticmethod
def format_value(entry):
values = entry['values']
if len(values) == 1:
return "value={0!s}".format(entry['values'][0])
else:
# influxdb supports writing a record with multiple field values.
# e.g: 'cpu_load_short,host=server01,region=us-west mem=0.1,cpu=0.2 1422568543702900257'
field_pairs = []
for key, value in zip(entry['dsnames'], values):
field_pairs.append("{0!s}={1!s}".format(key, value))
return ','.join(field_pairs)
| StarcoderdataPython |
3265048 | import requests
from urllib.parse import quote
import datetime
import json
import pandas as pd
import locale
import time
from IPython.display import clear_output
import numpy as np
from requests.exceptions import ProxyError
from IPython.display import clear_output
class Code():
def __init__(self):
self.country_dict = {"Deutschland": "DE",
"Belgien": "BE",
"China (Volksrepublik)": "CN",
"Finnland": "FI",
"Frankreich": "FR",
"United Kingdom": "UK",
"Grossbritannien": "UK",
"Italien": "IT",
'Luxemburg': "LU",
"Schweiz": "CH",
"Österreich": "AT",
"Mexiko": "MX",
"Niederlande": "NL",
"Schweden": "SE",
"Tschechische Republik": "CZ",
"Spanien": "ES",
"Ungarn": "HU",
"Singapur": "SG",
"Portugal": "PT",
"Vereinigte Staaten von Amerika": "US"}
self.url_adress = 'http://dev.virtualearth.net/REST/v1/Locations'
self.example_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
def geocode_to_json(self, def_id, df, plz = 'TXT_PLZ', city = 'TXT_ORT', country = 'TXT_LAND', street ='TXT_STRASSE', id='ID', save_evry = 0, output='output.csv', encoding='utf-8', debug = False, status = False):
'''
:param def_id:
:param df:
:param plz:
:param city:
:param country:
:param street:
:param id:
:param save_evry:
:param output:
:param encoding:
:param debug:
:return:
'''
if(len(set(['JSON','STATUS','EDIT_DATE']) - set(df.columns.values)) > 0):
df['JSON'] = '{}'
df['STATUS'] = 0
df['EDIT_DATE'] = datetime.datetime.now()
counter = 0
start_time = datetime.datetime.now()
df_out = pd.DataFrame()
df_out['ID'] = df[id]
df_out['JSON'] = df['JSON']
df_out['STATUS'] = df['STATUS']
df_out['EDIT_DATE'] = df['EDIT_DATE']
for adr, row in df.query('STATUS == 0').iterrows():
plz = str(df.loc[adr].TXT_PLZ).replace('/', '').replace('.0', '')
city = str(df.loc[adr].TXT_ORT).replace('/', ' ')
country = self.country_dict[str(df.loc[adr].TXT_KURZ)]
street = str(df.loc[adr].TXT_STRASSE).replace('/', ' ')
street_number = str(df.loc[adr].TXT_HAUSNR).replace('/', ' ')
search_string = street + ' ' + street_number + ', ' + plz + ' ' + city + ' ' + country
while(True):
time.sleep(1)
self.debug('',False,True) #clear output
self.debug('search_string: ' + search_string, debug)
adress_coded = quote(street + ' ' + street_number, safe='')
city_coded = quote(city,safe='')
search_query = '' + adress_coded + ', ' + plz + ' ' + city_coded + ' ' + country
self.debug('search_query: ' + search_query, debug)
server_status = ''
json_data = '{}'
request_url = '{}?CountryRegion={}&postalCode={}&locality={}&addressLine={}&key={}'.format(self.url_adress, country, plz, city_coded, adress_coded, def_id)
self.debug('request_url: ' + request_url, debug)
web_dict = self.query_REST(request_url, debug)
if(web_dict['STATUS'] != 0):
df.at[adr, 'JSON'] = web_dict['JSON']
df.at[adr, 'STATUS'] = web_dict['STATUS']
df.at[adr, 'EDIT_DATE'] = datetime.datetime.now()
counter +=1
self.safe(df,save_evry,counter,output,encoding)
break
self.status(df,counter,start_time,status)
df_out = pd.DataFrame()
df_out['ID'] = df[id]
df_out['JSON'] = df['JSON']
df_out['STATUS'] = df['STATUS']
df_out['EDIT_DATE'] = df['EDIT_DATE']
self.safe(df, 1, counter, output, encoding)
return df_out
def status(self, df, counter, start_time, status=True):
'''
:param df:
:param counter:
:param start_time:
:param status:
:return:
'''
print('Done: ' + str(df.query('STATUS > 0').shape[0]) + ' ' + str(int(df.query('STATUS > 0').shape[0] / df.shape[0] * 100 * 1000) / 1000) + '%')
print('Workload: ' + str(df.query('STATUS == 0').shape[0]) + ' to do')
time_gone = datetime.datetime.now()
td = time_gone - start_time
time_togo = (td.total_seconds() / counter) * (df.query('STATUS == 0').shape[0])
days = int(time_togo / (24 * 60 * 60))
hours = ((time_togo / (24 * 60 * 60)) - days) * 24
fin_date = datetime.datetime.now() + datetime.timedelta(days=days, hours=hours)
print('ETA: ' + str(days) + ' Days and ' + str(hours) + ' Hours ')
print('Due date: ' + fin_date.strftime('%d.%m.%Y %H:%M'))
def safe(self, df, save_evry, counter, output, encoding):
'''
:param df:
:param save_evry:
:param counter:
:param output:
:param encoding:
:return:
'''
if (save_evry > 0 and counter % save_evry == 0):
df.to_csv(output, sep=';', encoding = encoding, index=False)
def query_REST(self, request_url, debug=False):
'''
:param request_url:
:param debug:
:return:
'''
web_data = '{}'
web_dict = {'JSON':'{}', 'STATUS':0}
try:
with requests.get(request_url, headers=self.example_headers) as url:
if (url.content.decode().find("coordinates") > 0):
web_dict['JSON'] = json.loads(url.content.decode())
web_dict['STATUS'] = 1
if (web_dict['JSON']['statusDescription'] == 'OK'):
self.debug('LATITUDE: ' + str(web_dict['JSON']['resourceSets'][0]['resources'][0]['geocodePoints'][0]['coordinates'][0]) + ' LONGITUDE: ' + str(
web_dict['JSON']['resourceSets'][0]['resources'][0]['geocodePoints'][0]['coordinates'][1]), debug)
else:
self.debug(url.content.decode(), debug)
else:
if (url.content.decode().find("The resource cannot be found.") > 0):
self.debug('Adress not found ' + url.content.decode(), debug)
web_dict['STATUS'] = 3
web_dict['JSON'] = url.content.decode()
self.debug('retry because: \n' + url.content.decode(), debug)
web_dict['STATUS'] = 3
web_dict['JSON'] = url.content.decode()
except:
self.debug('Waiting 1 Min for Proxy', debug)
time.sleep(60 * 5)
return web_dict
def debug(self, message, debug=True, cleanup=False):
'''
:param message:
:param debug:
:param cleanup:
:return:
'''
if debug:
print(message)
if cleanup:
clear_output()
| StarcoderdataPython |
1784340 | <gh_stars>0
#!/usr/bin/python3
import yaml
from optparse import OptionParser
import os.path
known_dirs = {}
includes = []
opts = set()
def process(options):
yaml_fname = options.yaml_filename
prefix = os.path.realpath(options.top_directory)
with open(yaml_fname, 'r') as f:
y = yaml.load(f, Loader=yaml.CLoader)
for el in y:
dir = el['directory']
if dir not in known_dirs:
known_dirs[dir] = [el['file']]
else:
known_dirs[dir].append(el['file'])
for arg in el['arguments']:
if arg.startswith("-I"):
fname = arg[2:]
if fname[0] != '/':
fname = os.path.realpath(dir + '/' + fname)
fname = "${CMAKE_CURRENT_SOURCE_DIR}" + fname[fname.startswith(prefix) and len(prefix):]
fname not in includes and includes.append(fname)
if arg.startswith("-D"):
opts.add(arg[2:])
if options.project_name.endswith(".a"):
project_name = options.project_name[:-2]
library_type = "STATIC"
project_type = "library"
elif options.project_name.endswith(".so"):
project_name = options.project_name[:-3]
library_type = "SHARED"
project_type = "library"
else:
project_name = options.project_name
library_type = ""
project_type = "executable"
print("""cmake_minimum_required(VERSION 3.3)
set(PROJECT_NAME %s)
project(${PROJECT_NAME})
add_%s(${PROJECT_NAME} %s)""" % (project_name, project_type, library_type))
if len(known_dirs):
print("target_sources(${PROJECT_NAME} PRIVATE")
for dir in known_dirs:
for file in known_dirs[dir]:
dir = dir[dir.startswith(prefix) and len(prefix):]
print(" ${CMAKE_CURRENT_SOURCE_DIR}" + dir + '/' + file)
print(")")
if len(includes):
print("target_include_directories(${PROJECT_NAME} PRIVATE")
for incl in includes:
print(" " + incl)
print(")")
if len(opts):
print("target_compile_definitions(${PROJECT_NAME} PRIVATE")
for opt in opts:
print(" " + opt)
print(")")
usage = """
%prog [options]
Create CMakeLists.txt based on compile_commands.json (e.g. produced by _bear_)
"""
parser = OptionParser(usage=usage)
parser.add_option("--yaml", "--yaml-file", "--yaml-filename", dest="yaml_filename", default="compile_commands.json",
help="input file produced by _bear_")
parser.add_option("--topdir", "--top-dir", dest="top_directory", default=".")
parser.add_option("--project", "--project-name", dest="project_name", default="a_cmake_project",
help="filename with suffix (e.g. '.so')")
# parser.add_option("--per-dir", "--per-directory", dest="per_directory_mode", default=True,
# help="CMakeLists.txt per directory")
(options, args) = parser.parse_args()
process(options)
| StarcoderdataPython |
3305666 | <filename>partd/buffer.py
from .core import Interface
from threading import Lock
from toolz import merge_with, topk, accumulate, pluck
from operator import add
from bisect import bisect
from collections import defaultdict
from queue import Queue, Empty
def zero():
return 0
class Buffer(Interface):
def __init__(self, fast, slow, available_memory=1e9):
self.lock = Lock()
self.fast = fast
self.slow = slow
self.available_memory = available_memory
self.lengths = defaultdict(zero)
self.memory_usage = 0
Interface.__init__(self)
def __getstate__(self):
return {'fast': self.fast,
'slow': self.slow,
'memory_usage': self.memory_usage,
'lengths': self.lengths,
'available_memory': self.available_memory}
def __setstate__(self, state):
Interface.__setstate__(self, state)
self.lock = Lock()
self.__dict__.update(state)
def append(self, data, lock=True, **kwargs):
if lock: self.lock.acquire()
try:
for k, v in data.items():
self.lengths[k] += len(v)
self.memory_usage += len(v)
self.fast.append(data, lock=False, **kwargs)
while self.memory_usage > self.available_memory:
keys = keys_to_flush(self.lengths, 0.1, maxcount=20)
self.flush(keys)
finally:
if lock: self.lock.release()
def _get(self, keys, lock=True, **kwargs):
if lock: self.lock.acquire()
try:
result = list(map(add, self.fast.get(keys, lock=False),
self.slow.get(keys, lock=False)))
finally:
if lock: self.lock.release()
return result
def _iset(self, key, value, lock=True):
""" Idempotent set """
if lock: self.lock.acquire()
try:
self.fast.iset(key, value, lock=False)
finally:
if lock: self.lock.release()
def _delete(self, keys, lock=True):
if lock: self.lock.acquire()
try:
self.fast.delete(keys, lock=False)
self.slow.delete(keys, lock=False)
finally:
if lock: self.lock.release()
def drop(self):
self._iset_seen.clear()
self.fast.drop()
self.slow.drop()
def __exit__(self, *args):
self.drop()
def flush(self, keys=None, block=None):
""" Flush keys to disk
Parameters
----------
keys: list or None
list of keys to flush
block: bool (defaults to None)
Whether or not to block until all writing is complete
If no keys are given then flush all keys
"""
if keys is None:
keys = list(self.lengths)
self.slow.append(dict(zip(keys, self.fast.get(keys))))
self.fast.delete(keys)
for key in keys:
self.memory_usage -= self.lengths[key]
del self.lengths[key]
def keys_to_flush(lengths, fraction=0.1, maxcount=100000):
""" Which keys to remove
>>> lengths = {'a': 20, 'b': 10, 'c': 15, 'd': 15,
... 'e': 10, 'f': 25, 'g': 5}
>>> keys_to_flush(lengths, 0.5)
['f', 'a']
"""
top = topk(max(len(lengths) // 2, 1),
lengths.items(),
key=1)
total = sum(lengths.values())
cutoff = min(maxcount, max(1,
bisect(list(accumulate(add, pluck(1, top))),
total * fraction)))
result = [k for k, v in top[:cutoff]]
assert result
return result
| StarcoderdataPython |
1625517 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Module to define CONSTANTS used across the project
"""
import os
from webargs import fields
from marshmallow import Schema, INCLUDE
from official.utils.logs import logger as official_logger
# identify basedir for the package
BASE_DIR = os.path.dirname(os.path.normpath(os.path.dirname(__file__)))
# default location for input and output data, e.g. directories 'data' and 'models',
# is either set relative to the application path or via environment setting
IN_OUT_BASE_DIR = BASE_DIR
if 'APP_INPUT_OUTPUT_BASE_DIR' in os.environ:
env_in_out_base_dir = os.environ['APP_INPUT_OUTPUT_BASE_DIR']
if os.path.isdir(env_in_out_base_dir):
IN_OUT_BASE_DIR = env_in_out_base_dir
else:
msg = "[WARNING] \"APP_INPUT_OUTPUT_BASE_DIR=" + \
"{}\" is not a valid directory! ".format(env_in_out_base_dir) + \
"Using \"BASE_DIR={}\" instead.".format(BASE_DIR)
print(msg)
DATA_DIR = os.path.join(IN_OUT_BASE_DIR, 'data')
MODELS_DIR = os.path.join(IN_OUT_BASE_DIR, 'models')
CIFAR10_REMOTE_URL="https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
IMAGENET_MINI_REMOTE_URL="https://nc.deep-hybrid-datacloud.eu/s/aZr8Hi5Jk7GMSe4/download?path=%2F&files=imagenet_mini.tar"
# FLAAT needs a list of trusted OIDC Providers.
# we select following three providers:
Flaat_trusted_OP_list = [
'https://aai.egi.eu/oidc/',
'https://iam.deep-hybrid-datacloud.eu/',
'https://iam.extreme-datacloud.eu/',
]
# Define CONSTANTS for this benchmark flavors:
# BENCHMARK_TYPE
# benchmark: flavor['synthetic', 'dataset', 'accuracy'?]
# pro: 'pro'
# Use BENCHMARK_GROUP instead of BENCHMARK_TYPE ??
BENCHMARK_TYPE = os.getenv('BENCHMARK_TYPE', 'benchmark')
if BENCHMARK_TYPE not in ['benchmark', 'pro']:
BENCHMARK_TYPE = 'benchmark'
DOCKER_BASE_IMAGE = os.getenv('DOCKER_BASE_IMAGE', '')
# Use the timeformat of tf-benchmark, smth. '%Y-%m-%dT%H:%M:%S.%fZ'
TIME_FORMAT = official_logger._DATE_TIME_FORMAT_PATTERN
## BENCHMARK version ['synthetic', 'dataset', 'accuracy']
## needs following to be defined.
# experimentally found:
# GTX1070, 8GB:
# ============
# googlenet: 192
# imagenet : 48
# resnet50 : 48
# vgg16 : 32 (48 - reboot)
# => score = ca. 600
#
# also:
# mobilenet: 1024
# resnet152: 24
# vgg19 : 32 (48 - reboot)
##
# batch_size(s) are for 4GB memory:
MODELS = {'googlenet' : 96,
'inception3' : 24,
'resnet50' : 24,
'vgg16': 16
}
BATCH_SIZE_CPU = 16
NUM_EPOCHS = float(os.getenv('BENCHMARK_NUM_EPOCHS', '0.'))
OPTIMIZER = 'sgd' # to consider: [sgd','momentum','rmsprop','adam']
USE_FP16 = False
EVALUATION = False
IF_CLEANUP = True
##
## DEBUG Flags
DEBUG_MODEL = True
# Training and predict(deepaas>=0.5.0) arguments as a dict of dicts
def get_train_args_schema():
"""Function to return the reference to proper TrainArgsSchema
"""
if BENCHMARK_TYPE == 'pro':
train_args_schema = TrainArgsSchemaPro()
else :
train_args_schema = TrainArgsSchemaBench()
return train_args_schema
# class / place to describe arguments for train()
class TrainArgsSchemaBench(Schema):
class Meta:
unknown = INCLUDE # supports extra parameters
flavor = fields.Str(missing='synthetic',
enum=['synthetic','dataset'],
description='Benchmark Flavor to use.',
required=False
)
num_gpus = fields.Integer(missing=1,
description='Number of GPUs to train on \
(one node only). If set to zero, CPU is used.',
required=False
)
# 'pro' version of class TranArgsSchema
class TrainArgsSchemaPro(Schema):
class Meta:
unknown = INCLUDE # supports extra parameters
batch_size_per_device = fields.Integer(missing=64,
description='Batch size for each GPU.',
required=False
)
dataset = fields.Str(missing='synthetic_data',
enum=['synthetic_data',
'imagenet',
'imagenet_mini',
'cifar10'],
description='Dataset to perform training on. \
synthetic_data: randomly generated ImageNet-like \
images; imagenet_mini: 3% of the real ImageNet \
dataset',
required=False
)
model = fields.Str(missing='resnet50 (ImageNet)',
enum = ['googlenet (ImageNet)',
'inception3 (ImageNet)',
'mobilenet (ImageNet)',
'overfeat (ImageNet)',
'resnet50 (ImageNet)',
'resnet152 (ImageNet)',
'vgg16 (ImageNet)',
'vgg19 (ImageNet)',
'resnet56 (Cifar10)',
'resnet110 (Cifar10)',
'alexnet (ImageNet, Cifar10)'],
description='CNN model for training. N.B. Models only \
support specific data sets, given in brackets. \
synthetic_data can only be processed by ImageNet models.',
required=False
)
num_gpus = fields.Integer(missing=1,
description='Number of GPUs to train on \
(one node only). If set to zero, CPU is used.',
required=False
)
num_epochs = fields.Float(missing=NUM_EPOCHS,
description='Number of epochs to \
train on (float value, < 1.0 allowed).',
required=False
)
optimizer = fields.Str(missing='sgd',
enum=['sgd','momentum','rmsprop','adam'],
description='Optimizer to use.',
required=False
)
use_fp16 = fields.Boolean(missing=False,
enum = [False, True],
description='Use 16-bit floats for certain \
tensors instead of 32-bit floats. ',
required=False
)
weight_decay = fields.Float(missing=4.0e-5,
description='Weight decay factor for training',
required=False
)
evaluation = fields.Boolean(missing=True,
enum = [False, True],
description='Perform evaluation after the \
benchmark in order to get accuracy results \
(only meaningful on real data sets!).',
required=False
)
if_cleanup = fields.Boolean(missing=False,
enum = [False, True],
description='If to delete training and \
evaluation directories.',
required=False
)
# class / place to describe arguments for predict()
class PredictArgsSchema(Schema):
class Meta:
unknown = INCLUDE # supports extra parameters
files = fields.Field(required=False,
missing=None,
type="file",
data_key="data",
location="form",
description="NOT implemented!."
)
| StarcoderdataPython |
1710973 | <filename>Trees/Find_Subtree.py
#Given 2 binary trees t and s, find if s has an equal subtree in t, where the structure and the values are the same.
#Return True if it exists, otherwise return False
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def __repr__(self):
return "(Value: {} Left: {} Right: {})".format(self.value,self.left,self.right)
def find_subtree(s, t):
if not s:
return True
if not t:
return False
if helper(s,t):
return True
return find_subtree(s, t.left) or find_subtree(s,t.right)
def helper(root, subroot):
if not root and not subroot:
return True
if root and subroot and root.value == subroot.value:
return helper(root.left, subroot.left) and helper(root.right, subroot.right)
t3 = Node(4, Node(3), Node(2))
t2 = Node(5, Node(4), Node(-1))
t = Node(1, t2, t3)
s = Node(4, Node(3), Node(2))
"""
Tree t:
1
/ \
4 5
/ \ / \
3 2 4 -1
Tree s:
4
/ \
3 2
"""
#print(find_subtree(s, t))
# True
| StarcoderdataPython |
1700435 | <gh_stars>1-10
from django.dispatch import receiver
from django.db.models.signals import post_save
from .models import Hotels, Room
#signal to send email on successful property register
def property_created(sender, instance, created, *args, **kwargs):
hotel = instance
if created:
#send an email
hotel_is_created = Hotels.objects.get_or_create(hotel=hotel)
if hotel_is_created:
#create hash and send email
hotel_is_created.property_register_email()
post_save.connect(hotel_is_created, sender=Hotels) | StarcoderdataPython |
155486 | ##### file path
# input
path_df_D = "../../data/raw/tianchi_fresh_comp_train_user.csv"
# output
path_df_part_1 = "raw/df_part_1.csv"
path_df_part_2 = "raw/df_part_2.csv"
path_df_part_3 = "raw/df_part_3.csv"
path_df_part_1_tar = "raw/df_part_1_tar.csv"
path_df_part_2_tar = "raw/df_part_2_tar.csv"
path_df_part_1_uic_label = "raw/df_part_1_uic_label.csv"
path_df_part_2_uic_label = "raw/df_part_2_uic_label.csv"
path_df_part_3_uic = "raw/df_part_3_uic.csv"
########################################################################
'''Step 1: divide the data set to 3 part
part 1 - train: 11.22~11.27 > 11.28;
part 2 - train: 11.29~12.04 > 12.05;
part 3 - test: 12.13~12.18 (> 12.19);
here we omit the geo info
'''
# data set operation based pandas
import pandas as pd
# batch = 0
# dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d %H')
# for df in pd.read_csv('tianchi_fresh_comp_train_user.csv',
# parse_dates=['time'],
# index_col=['time'],
# date_parser=dateparse,
# chunksize=100000): # operation on chunk as the data file is too large
# try:
# df_part_1 = df['2014-11-22':'2014-11-27']
# df_part_1_tar = df['2014-11-28']
# df_part_2 = df['2014-11-29':'2014-12-04']
# df_part_2_tar = df['2014-12-05']
# df_part_3 = df['2014-12-13':'2014-12-18']
#
# df_part_1.to_csv(path_df_part_1,
# columns=['user_id', 'item_id', 'behavior_type', 'item_category'],
# header=False, mode='a')
# df_part_1_tar.to_csv(path_df_part_1_tar,
# columns=['user_id', 'item_id', 'behavior_type', 'item_category'],
# header=False, mode='a')
# df_part_2.to_csv(path_df_part_2,
# columns=['user_id', 'item_id', 'behavior_type', 'item_category'],
# header=False, mode='a')
# df_part_2_tar.to_csv(path_df_part_2_tar,
# columns=['user_id', 'item_id', 'behavior_type', 'item_category'],
# header=False, mode='a')
# df_part_3.to_csv(path_df_part_3,
# columns=['user_id', 'item_id', 'behavior_type', 'item_category'],
# header=False, mode='a')
#
# batch += 1
# print('chunk %d done.' % batch)
#
# except StopIteration:
# print("divide the data set finish.")
# break
########################################################################
'''Step 2 construct U-I-C_label of df_part 1 & 2
U-I-C of df_part 3
'''
##### part_1 #####
# uic
data_file = path_df_part_1
try:
df_part_1 = pd.read_csv(data_file, index_col=False)
df_part_1.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
except Exception as e:
print(e)
df_part_1_uic = df_part_1.drop_duplicates(['user_id', 'item_id', 'item_category'])[['user_id', 'item_id', 'item_category']]
data_file = path_df_part_1_tar
try:
df_part_1_tar = pd.read_csv(data_file, index_col=False, parse_dates=[0])
df_part_1_tar.columns = ['time', 'user_id','item_id','behavior_type','item_category']
except Exception as e:
print(e)
# uic + label
df_part_1_uic_label_1 = df_part_1_tar[df_part_1_tar['behavior_type'] == 4][['user_id','item_id','item_category']]
df_part_1_uic_label_1.drop_duplicates(['user_id', 'item_id'], 'last', inplace=True)
df_part_1_uic_label_1['label'] = 1
df_part_1_uic_label = pd.merge(df_part_1_uic,
df_part_1_uic_label_1,
on=['user_id', 'item_id', 'item_category'],
how='left').fillna(0).astype('int')
df_part_1_uic_label.to_csv(path_df_part_1_uic_label, index=False)
##### part_2 #####
# uic
data_file = path_df_part_2
try:
df_part_2 = pd.read_csv(data_file, index_col=False)
df_part_2.columns = ['time', 'user_id', 'item_id', 'behavior_type','item_category']
except Exception as e:
print(e)
df_part_2_uic = df_part_2.drop_duplicates(['user_id', 'item_id', 'item_category'])[['user_id', 'item_id', 'item_category']]
data_file = path_df_part_2_tar
try:
df_part_2_tar = pd.read_csv(data_file, index_col=False, parse_dates = [0])
df_part_2_tar.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
except Exception as e:
print(e)
# uic + label
df_part_2_uic_label_1 = df_part_2_tar[df_part_2_tar['behavior_type'] == 4][['user_id','item_id','item_category']]
df_part_2_uic_label_1.drop_duplicates(['user_id', 'item_id'], 'last', inplace=True)
df_part_2_uic_label_1['label'] = 1
df_part_2_uic_label = pd.merge(df_part_2_uic,
df_part_2_uic_label_1,
on=['user_id', 'item_id', 'item_category'],
how='left').fillna(0).astype('int')
df_part_2_uic_label.to_csv(path_df_part_2_uic_label, index=False)
##### part_3 #####
# uic
data_file = path_df_part_3
try:
df_part_3 = pd.read_csv(data_file, index_col=False)
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
except Exception as e:
print(e)
df_part_3_uic = df_part_3.drop_duplicates(['user_id', 'item_id', 'item_category'])[['user_id', 'item_id', 'item_category']]
df_part_3_uic.to_csv(path_df_part_3_uic, index=False) | StarcoderdataPython |
1601544 | <filename>src/python/builtins.py
from basetype import SimpleTypeFunc, CustomTypeFunc, BuiltinTypeClass, LibClass, LibFunc, makeFuncSpec, makeFuncProto
import operator
import ast
import logging
import parser
class IntegerTimesFunc(LibFunc):
def __init__(self, cls):
argtype = makeFuncSpec(ast.makePrimitiveType('void'), [cls.createType()])
super(IntegerTimesFunc, self).__init__(cls, makeFuncProto('times', ast.makePrimitiveType('void'), [argtype]))
def evaluateCall(self, visitor, callinfo):
# visitor.logger.debug('IntegerTimesFunc.evaluateCall entry start', (self, callinfo))
callinfo.args[0].evaluateParam(visitor)
callerval = callinfo.caller.object.visit(visitor)
assert len(callinfo.args) == 1, (callinfo.args, callinfo.caller)
visitor.implicit_args_stack.insert(0, [])
for i in range(callerval):
visitor.pushScope()
visitor.implicit_args_stack[0] = [i]
# visitor.logger.debug('IntegerTimesFunc.evaluateCall item', i)
callinfo.args[0].visit(visitor)
visitor.popScope()
# visitor.logger.debug('IntegerTimesFunc.evaluateCall entry end', (self, callinfo))
del visitor.implicit_args_stack[0]
return None
class IntegerParse(LibFunc):
def __init__(self, cls):
protostr = 'parse(string, %s) => %s' % (cls.name, cls.name)
LibFunc.__init__(self, cls, protostr)
class IntegerToString(LibFunc):
def __init__(self, cls):
LibFunc.__init__(self, cls, 'toString() => string')
class IntegerClass(BuiltinTypeClass):
def __init__(self, name):
BuiltinTypeClass.__init__(self)
self.name = name
self.addDef(IntegerParse(self))
self.addDef(IntegerToString(self))
self.addDef(IntegerTimesFunc(self))
self.ops = {'==' : operator.eq, '>':operator.gt, '!=':operator.ne,
'<':operator.lt, '>=':operator.ge, '<=':operator.le,
'+':operator.add,'-':operator.sub,'*':operator.mul,'/':operator.div,'%':operator.mod}
def getType(self):
return self
def getTarget(self):
return self
def createType(self):
return ast.makePrimitiveType(self.name)
def evaluateNil(self, visitor):
return 0
def evaluateBinaryOp(self, visitor, opstr, left, right):
left = left.visit(visitor)
right = right.visit(visitor)
# visitor.logger.debug('IntegerClass.evaluateBinaryOp', opstr, left, right)
op = self.ops[opstr]
return op(left, right)
def evaluateUnaryOp(self, visitor, opstr, left):
# visitor.logger.debug('IntegerClass.evaluateUnaryOp', opstr, left)
assert opstr == '-'
return operator.neg(left)
def eval_parse(self, caller, s, defval):
# assert caller is None, (self, caller, s, defval)
# print('eval_parse', caller, s, defval)
return int(s)
class FloatingClass(BuiltinTypeClass):
def __init__(self, name):
super(FloatingClass, self).__init__()
self.name = name
self.ops = {'==' : operator.eq, '>':operator.gt, '!=':operator.ne,
'<':operator.lt, '>=':operator.ge, '<=':operator.le,
'+':operator.add,'-':operator.sub,'*':operator.mul,'/':operator.div,'%':operator.mod}
def evaluateNil(self, visitor):
return 0.0
def evaluateBinaryOp(self, visitor, opstr, left, right):
left = left.visit(visitor)
right = right.visit(visitor)
# visitor.logger.debug('FloatingClass.evaluateBinaryOp', opstr, left, right)
op = self.ops[opstr]
return op(left, right)
def evaluateUnaryOp(self, visitor, opstr, left):
# visitor.logger.debug('FloatingClass.evaluateUnaryOp', opstr, left)
assert opstr == '-'
return operator.neg(left)
class SimpleTypeClass(BuiltinTypeClass):
def __init__(self, name):
BuiltinTypeClass.__init__(self)
self.name = name
class CharClass(SimpleTypeClass):
def __init__(self):
SimpleTypeClass.__init__(self, 'char')
self.addDef(CustomTypeFunc(self, 'upper() => char'))
self.addDef(CustomTypeFunc(self, 'lower() => char'))
self.addDef(CustomTypeFunc(self, 'isUpper() => bool'))
self.addDef(CustomTypeFunc(self, 'isLower() => bool'))
self.ops = {'==' : operator.eq, '>':operator.gt, '!=':operator.ne,
'<':operator.lt, '>=':operator.ge, '<=':operator.le,
'+':operator.add,'-':operator.sub,'*':operator.mul,'/':operator.div,'%':operator.mod}
def evaluateBinaryOp(self, visitor, opstr, left, right):
# visitor.logger.debug('CharClass.evaluateBinaryOp', opstr, left, right)
left = left.visit(visitor)
right = right.visit(visitor)
op = self.ops[opstr]
return op(left, right)
def eval_upper(self, ch):
return ch.upper()
def eval_lower(self, ch):
return ch.lower()
def eval_isUpper(self, ch):
return ch.isupper()
def eval_isLower(self, ch):
return ch.islower()
def evaluateNil(self, visitor):
return 0
class BoolClass(SimpleTypeClass):
def __init__(self):
SimpleTypeClass.__init__(self, 'bool')
self.ops = {'and':operator.and_, 'or':operator.or_}
def evaluateUnaryOp(self, visitor, opstr, left):
# visitor.logger.debug('BoolClass.evaluateUnaryOp', opstr, left)
assert opstr == 'not'
return not left
def evaluateBinaryOp(self, visitor, opstr, left, right):
# visitor.logger.debug('BoolClass.evaluateBinaryOp', opstr, left, right)
assert opstr in ['and', 'or']
left = left.visit(visitor)
op = self.ops[opstr]
if opstr == 'and':
if not left:
return False
return op(left, right.visit(visitor))
if left:
return True
return op(left, right.visit(visitor))
def evaluateNil(self, visitor):
return False
class VoidClass(SimpleTypeClass):
def __init__(self):
SimpleTypeClass.__init__(self, 'void')
self.name = 'void'
class NilClass(SimpleTypeClass):
def __init__(self):
SimpleTypeClass.__init__(self, 'nil')
self.name = 'nil'
class ClassInfo(BuiltinTypeClass):
def __init__(self):
super(ClassInfo, self).__init__()
self.name = 'Class'
self.addDef(CustomTypeFunc(self, 'getName() => string'))
class GenericDictEachFunc(LibFunc):
def __init__(self, cls):
argtype = makeFuncSpec(ast.makePrimitiveType('void'), [cls.createKeyType(), cls.createValueType()])
super(GenericDictEachFunc, self).__init__(cls, makeFuncProto('each', ast.makePrimitiveType('void'), [argtype]))
def evaluateCall(self, visitor, callinfo):
# visitor.logger.debug('DictEachFunc.evaluateCall entry start', self, callinfo)
assert len(callinfo.args) == 1, (callinfo.caller.object, callinfo.args)
assert isinstance(callinfo.args[0], ast.Closure), (callinfo, callinfo.getOwnerFunc())
callinfo.args[0].evaluateParam(visitor)
coll = callinfo.caller.object.visit(visitor)
# visitor.logger.debug('DictEachFunc.evaluateCall coll', self, callinfo, coll, coll.keys())
visitor.implicit_args_stack.insert(0, [])
for key, val in coll.iteritems():
visitor.pushScope()
visitor.implicit_args_stack[0] = [key, val]
# visitor.logger.debug('DictEachFunc.evaluateCall key val', key, val, callinfo)
callinfo.args[0].visit(visitor)
visitor.popScope()
# visitor.logger.debug('DictEachFunc.evaluateCall entry end', self, callinfo)
del visitor.implicit_args_stack[0]
return None
class GenericClass(BuiltinTypeClass):
def __init__(self, name, impl):
super(GenericClass, self).__init__()
self.name = name
self.impl = impl
self.genericParams = impl.genericParams
self.instantiator = ast.GenericInstantiator(impl.genericParams)
self.astFieldNames = ['genericParams']
def instantiate(self, genericArgs, visitor):
realGenericArgs = self.instantiator.getRealArgs(genericArgs)
cls = self.instantiator.find(realGenericArgs)
if cls:
# visitor.logger.debug('GenericClass.instantiate existing', self.name, self, genericArgs, realTypeArgs, cls.genericArgs)
return cls
cls = self.impl(realGenericArgs)
# visitor.logger.debug('GenericClass.instantiate new', self.name, self, realGenericArgs, cls.genericArgs)
self.instantiator.cache(cls)
visitor.setupNewItem(cls, self, True)
return cls
class GenericClassImpl(BuiltinTypeClass):
def __init__(self, genericParams, genericArgs):
BuiltinTypeClass.__init__(self)
self.instantiation = ast.GenericInstantiation(genericParams, genericArgs)
# self.astFieldNames = ['funcs']
def cacheName(self, visitor):
self.doVisitChildren(visitor)
def findLocalSymbol(self, name):
# print('GenericClassImpl.findLocalSymbol', name, self)
ret = self.instantiation.findLocalSymbol(name)
# print('GenericClassImpl.findLocalSymbol result', name, self, gt)
return ret if ret else BuiltinTypeClass.findLocalSymbol(self, name)
class GenericDictClassImpl(GenericClassImpl):
genericParams = [ast.GenericTypeParam('KeyType'), ast.GenericTypeParam('ValueType')]
def __init__(self, genericArgs):
super(GenericDictClassImpl, self).__init__(GenericDictClassImpl.genericParams, genericArgs)
self.addDef(SimpleTypeFunc(self, 'size() => int', evaluator=len))
self.addDef(SimpleTypeFunc(self, 'clear()', evaluator=lambda d:d.clear()))
self.addDef(CustomTypeFunc(self, makeFuncProto('get', self.createValueType(), [self.createKeyType(), self.createValueType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('set', ast.makePrimitiveType('void'), [self.createKeyType(), self.createValueType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('add', ast.makePrimitiveType('void'), [self.createKeyType(), self.createValueType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('remove', ast.makePrimitiveType('void'), [self.createKeyType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('contains', ast.makePrimitiveType('bool'), [self.createKeyType()])))
self.addDef(GenericDictEachFunc(self))
def getItemType(self):
return self.instantiation.genericArgs[1].type.getRealType()
def getKeyType(self):
return self.instantiation.genericArgs[0].type.getRealType()
def getValueType(self):
return self.instantiation.genericArgs[1].type.getRealType()
def createKeyType(self):
return ast.UserType(['KeyType'])
def createValueType(self):
return ast.UserType(['ValueType'])
def eval_get(self, coll, key, defval=None):
# print('DictClass.eval_get', coll, key, defval)
return coll.get(key, defval)
def eval_set(self, coll, key, item):
# assert False, (self, coll, item)
# print('DictClass.eval_set', self, coll, key, item)
assert key is not None, (self, coll, key, item)
# assert isinstance(key, str), (self, coll, key, item)
# assert False, (self, coll, key, item)
coll[key] = item
return None
def evaluateNil(self, visitor):
return {}
def eval_contains(self, coll, key):
return key in coll
def eval_not_contains(self, coll, key):
return key not in coll
class GenericSetEachFunc(LibFunc):
def __init__(self, cls):
argtype = makeFuncSpec(ast.makePrimitiveType('void'), [cls.createElementType(), ast.makePrimitiveType('int')])
super(GenericSetEachFunc, self).__init__(cls, makeFuncProto('each', ast.makePrimitiveType('void'), [argtype]))
def evaluateCall(self, visitor, callinfo):
# visitor.logger.debug('SetEachFunc.evaluateCall entry start', self, callinfo)
assert len(callinfo.args) == 1, (callinfo.caller.object, callinfo.args)
assert isinstance(callinfo.args[0], ast.Closure), (callinfo, callinfo.getOwnerFunc())
callinfo.args[0].evaluateParam(visitor)
coll = callinfo.caller.object.visit(visitor)
# visitor.logger.debug('SetEachFunc.evaluateCall coll', self, callinfo, coll, coll.keys())
visitor.implicit_args_stack.insert(0, [])
i = 0
for item in coll:
visitor.pushScope()
visitor.implicit_args_stack[0] = [item, i]
# visitor.logger.debug('SetEachFunc.evaluateCall key val', key, val, callinfo)
callinfo.args[0].visit(visitor)
i += 1
visitor.popScope()
# visitor.logger.debug('SetEachFunc.evaluateCall entry end', self, callinfo)
del visitor.implicit_args_stack[0]
return None
class GenericSetClassImpl(GenericClassImpl):
genericParams = [ast.GenericTypeParam('ElementType')]
def __init__(self, genericArgs):
super(GenericSetClassImpl, self).__init__(GenericSetClassImpl.genericParams, genericArgs)
self.addDef(SimpleTypeFunc(self, 'size() => int'))
self.addDef(SimpleTypeFunc(self, 'clear()'))
self.addDef(CustomTypeFunc(self, makeFuncProto('add', ast.makePrimitiveType('void'), [self.createElementType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('remove', ast.makePrimitiveType('void'), [self.createElementType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('contains', ast.makePrimitiveType('bool'), [self.createElementType()])))
self.addDef(GenericSetEachFunc(self))
def createElementType(self):
return ast.UserType(['ElementType'])
def evaluateNil(self, visitor):
return set()
def eval_add(self, coll, key):
coll.add(key)
def eval_remove(self, coll, key):
coll.remove(key)
def eval_size(self, coll):
return len(coll)
def eval_contains(self, coll, key):
return key in coll
def eval_not_contains(self, coll, key):
return key not in coll
def del_list(a):
del a[:]
class GenericListEachFunc(LibFunc):
def __init__(self, cls):
argtype = makeFuncSpec(ast.makePrimitiveType('void'), [cls.createElementType(), ast.makePrimitiveType('int')])
super(GenericListEachFunc, self).__init__(cls, makeFuncProto('each', ast.makePrimitiveType('void'), [argtype]))
# print('GenericListEachFunc.init', self.getSpec(), self)
def evaluateCall(self, visitor, callinfo):
# visitor.logger.debug('ListEachFunc.evaluateCall entry start', self, callinfo, callinfo.caller.object, callinfo.getOwnerFunc())
coll = callinfo.caller.object.visit(visitor)
assert isinstance(callinfo.args[0], ast.Closure), (callinfo, callinfo.getOwnerFunc())
callinfo.args[0].evaluateParam(visitor)
visitor.implicit_args_stack.insert(0, [])
i = 0
for item in coll:
visitor.pushScope()
assert len(callinfo.args) == 1, (coll, item, callinfo.args)
# assert False, (item, coll)
# itemval = item.visit(visitor)
itemval = item
visitor.implicit_args_stack[0] = [itemval, i]
# visitor.logger.debug('ListEachFunc.evaluateCall item', item, itemval, coll)
callinfo.args[0].visit(visitor)
i += 1
visitor.popScope()
# visitor.logger.debug('ListEachFunc.evaluateCall entry end', (self, callinfo))
del visitor.implicit_args_stack[0]
return None
class GenericListClassImpl(GenericClassImpl):
genericParams = [ast.GenericTypeParam('ElementType')]
def __init__(self, genericArgs):
super(GenericListClassImpl, self).__init__(GenericListClassImpl.genericParams, genericArgs)
self.addDef(SimpleTypeFunc(self, 'size() => int', evaluator=len))
self.addDef(SimpleTypeFunc(self, 'empty() => bool', evaluator=lambda left:len(left)==0))
self.addDef(SimpleTypeFunc(self, 'clear()', evaluator=del_list))
self.addDef(CustomTypeFunc(self, makeFuncProto('set', ast.makePrimitiveType('void'), [ast.makePrimitiveType('int'), self.createElementType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('flatten', self.createElementType(), [self.createCollectionType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('swap', ast.makePrimitiveType('void'), [self.createCollectionType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('append', ast.makePrimitiveType('void'), [self.createElementType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('extend', ast.makePrimitiveType('void'), [self.createCollectionType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('insert', ast.makePrimitiveType('void'), [ast.makePrimitiveType('int'), self.createElementType()])))
self.addDef(CustomTypeFunc(self, makeFuncProto('remove', ast.makePrimitiveType('void'), [self.createElementType()])))
self.addDef(CustomTypeFunc(self, 'removeAt(int)'))
self.addDef(CustomTypeFunc(self, makeFuncProto('contains', ast.makePrimitiveType('bool'), [self.createElementType()])))
self.addDef(GenericListEachFunc(self))
self.ops = {'==' : operator.eq, '!=':operator.ne,
'+':operator.add,'*':operator.mul}
def getItemType(self):
# print('GenericListClassImpl.getItemType', self.name, self.genericArgs)
return self.instantiation.genericArgs[0].type.getRealType()
def createElementType(self):
return ast.UserType(['ElementType'])
def createCollectionType(self):
return ast.createListType(self.createElementType())
def eval_append(self, coll, item):
# assert False, (self, coll, item)
coll.append(item)
return None
def evaluateNil(self, visitor):
# print('GenericListClassImpl.evaluateNil', self, visitor)
return []
def evaluateBinaryOp(self, visitor, opstr, left, right):
# visitor.logger.debug('GenericListClassImpl.evaluateBinaryOp', opstr, left, right)
left = left.visit(visitor)
right = right.visit(visitor)
op = self.ops[opstr]
return op(left, right)
def eval_contains(self, coll, item):
return item in coll
def eval_not_contains(self, coll, item):
return item not in coll
def eval_extend(self, coll, delta):
coll.extend(delta)
def eval_flatten(self, coll):
ret = []
for item in coll:
ret.extend(item)
return ret
def eval_removeAt(self, coll, val):
del coll[val]
def eval_remove(self, coll, val):
coll.remove(val)
def eval_insert(self, coll, pos, val):
coll.insert(pos, val)
def eval_set(self, coll, pos, val):
coll[pos] = val
class GenericArrayEachFunc(LibFunc):
def __init__(self, cls):
argtype = makeFuncSpec(ast.makePrimitiveType('void'), [cls.createElementType(), ast.makePrimitiveType('int')])
super(GenericArrayEachFunc, self).__init__(cls, makeFuncProto('each', ast.makePrimitiveType('void'), [argtype]))
def evaluateCall(self, visitor, callinfo):
# visitor.logger.debug('GenericArrayEachFunc.evaluateCall entry start', self, callinfo, callinfo.caller.object, callinfo.getOwnerFunc())
coll = callinfo.caller.object.visit(visitor)
assert isinstance(callinfo.args[0], ast.Closure), (callinfo, callinfo.getOwnerFunc())
callinfo.args[0].evaluateParam(visitor)
visitor.implicit_args_stack.insert(0, [])
i = 0
for item in coll:
visitor.pushScope()
assert len(callinfo.args) == 1, (coll, item, callinfo.args)
# assert False, (item, coll)
# itemval = item.visit(visitor)
itemval = item
visitor.implicit_args_stack[0] = [itemval, i]
# visitor.logger.debug('GenericArrayEachFunc.evaluateCall item', item, itemval, coll)
callinfo.args[0].visit(visitor)
i += 1
visitor.popScope()
# visitor.logger.debug('GenericArrayEachFunc.evaluateCall entry end', (self, callinfo))
del visitor.implicit_args_stack[0]
return None
class GenericArrayClassImpl(GenericClassImpl):
genericParams = [ast.GenericTypeParam('ElementType'), ast.GenericLiteralParam(ast.UserType(['int']))]
def __init__(self, genericArgs):
super(GenericArrayClassImpl, self).__init__(GenericArrayClassImpl.genericParams, genericArgs)
self.addDef(GenericArrayEachFunc(self))
assert isinstance(genericArgs[1].literal, ast.IntLiteral), (genericArgs[1], self)
self.size = genericArgs[1].literal.value
def createElementType(self):
return ast.UserType(['ElementType'])
def evaluateNil(self, visitor):
# assert False
return [visitor.nilValue] * self.size
def eval_get(self, coll, key, defval=None):
# print('GenericArrayClassImpl.eval_get', coll, key, defval)
return coll.get(key, defval)
def eval_set(self, coll, key, item):
# assert False, (self, coll, item)
# print('GenericArrayClassImpl.eval_set', self, coll, key, item)
assert key is not None, (self, coll, key, item)
# assert isinstance(key, str), (self, coll, key, item)
# assert False, (self, coll, key, item)
coll[key] = item
return None
def evalArg(arg, visitor):
if isinstance(arg, ast.This):
this = visitor.getThis()
return id(this)
return arg.visit(visitor)
def replaceAt(s, index, ch):
assert len(ch) == 1
sl = list(s)
sl[index] = ch
return ''.join(sl)
class StringClass(BuiltinTypeClass):
def __init__(self):
BuiltinTypeClass.__init__(self)
self.name = 'string'
self.addDef(CustomTypeFunc(self, 'split(string, int) => [string]'))
self.addDef(CustomTypeFunc(self, 'bytes() => [byte]'))
# self.addDef(CustomTypeFunc(self, 'fromBytes(byte[], int) => string'))
self.addDef(SimpleTypeFunc(self, 'size() => int', evaluator=len))
self.addDef(SimpleTypeFunc(self, 'empty() => bool', evaluator=lambda s: len(s) == 0))
self.addDef(CustomTypeFunc(self, 'format() => string'))
self.addDef(CustomTypeFunc(self, 'startsWith(string) => bool'))
self.addDef(CustomTypeFunc(self, 'endsWith(string) => bool'))
self.addDef(CustomTypeFunc(self, 'upper() => string'))
self.addDef(CustomTypeFunc(self, 'lower() => string'))
self.addDef(CustomTypeFunc(self, 'join([string]) => string'))
self.addDef(CustomTypeFunc(self, 'replaceAt(int, char) => string'))
self.addDef(CustomTypeFunc(self, 'mul(int) => string'))
self.ops = {
'==':operator.eq, '>':operator.gt, '!=':operator.ne,
'<':operator.lt, '>=':operator.ge, '<=':operator.le,
'+':operator.add, '*':operator.mul, '%':self.eval_format
}
def getItemType(self):
return ast.builtinCharType
def eval_startsWith(self, s, tag):
# print('eval_startsWith', s, tag)
return s.startswith(tag)
def eval_endsWith(self, s, tag):
ret = s.endswith(tag)
# print('eval_endsWith', s, tag, ret)
return ret
def eval_upper(self, s):
return s.upper()
def eval_lower(self, s):
return s.lower()
def eval_split(self, s, sep, maxsplit=None):
# assert False, (self, coll, item)
# assert False, (self, coll, key, item)
ret = s.split(sep, maxsplit) if maxsplit is not None else s.split(sep)
# print('StringClass.eval_split', self, s, sep, maxsplit, ret)
return ret
def evaluateNil(self, visitor):
return ''
def evaluateBinaryOp(self, visitor, opstr, left, right):
# visitor.logger.debug('StringClassBase.evaluateBinaryOp', opstr, left, right)
left = left.visit(visitor)
right = right.visit(visitor)
op = self.ops[opstr]
# visitor.logger.debug('StringClassBase.evaluateBinaryOp op', opstr, left, right, op)
if opstr == '%':
if not isinstance(right, list) and not isinstance(right, tuple):
right = [right]
return op(left, *right)
return op(left, right)
def eval_format(self, formatstr, *args):
# print('eval_format start', formatstr, args)
# if not isinstance(args, list) and not isinstance(args, tuple):
# args = [args]
# print('eval_format start args', formatstr, *args)
args = [id(arg) if isinstance(arg, ast.AstNode) else arg for arg in args]
# print('eval_format', formatstr, args)
return formatstr % tuple(args)
def eval_join(self, sep, args):
# print('eval_join start', sep, args)
return sep.join(args)
def eval_replaceAt(self, s, index, ch):
return replaceAt(s, index, ch)
class AnyRefClass(BuiltinTypeClass):
def __init__(self):
BuiltinTypeClass.__init__(self)
self.name = 'AnyRef'
def evaluateNil(self, visitor):
return visitor.nilValue
class GenericTupleClassImpl(GenericClassImpl):
genericParams = [ast.GenericVariadicTypeParam('ElementTypes')]
def __init__(self, genericArgs):
assert len(genericArgs) == 1, genericArgs
super(GenericTupleClassImpl, self).__init__(GenericTupleClassImpl.genericParams, genericArgs)
# print('GenericTupleClassImpl.init', genericArgs, genericArgs[0].types)
def evaluateNil(self, visitor):
return visitor.nilValue
def addLoggerVar(context, visitor):
if context.loggerVar is not None:
return
s = ''
if isinstance(context, ast.CodeUnit):
s = 'Logging.getLogger("%s")' % context.name
else:
s = 'Logging.getLogger(getClassName())'
v = ast.SingleVarDef('logger', ast.createUserType('Logger'), parser.parseExpr(s))
visitor.logger.error('addLoggerVar', context, visitor, v)
v.internal = True
context.definitions.append(v)
context.loggerVar = v
visitor.setupNewItem(v, context, False)
codeunit = context.getOwnerUnit()
codeunit.addLoggerImports(visitor)
return v
class LogFunction(LibFunc):
def __init__(self, cls, name, level):
proto = name + '([AnyRef])'
LibFunc.__init__(self, cls, proto)
self.level = level
def evaluateCall(self, visitor, callinfo):
loggerobj = visitor.getValue('logger')
# visitor.logger.debug('LogFunction.evaluateCall', self, self.level, visitor, callinfo, callinfo.getOwner(), loggerobj)
args = [arg.visit(visitor) for arg in callinfo.args]
loggerobj.log(self.level, *args)
def resolveCall(self, visitor, callinfo):
cls = callinfo.getOwnerClass()
assert isinstance(cls, ast.ClassDef) or cls is None, (self, visitor, callinfo, cls)
if cls is None or cls.loggerVar is None:
# if there is no logger var in class context or there is no class context, add logger var to code unit
# print('LogFunction.resolveCall', callinfo, callinfo.getOwnerFunc(), callinfo.getOwnerFunc().getOwner())
codeunit = callinfo.getOwnerUnit()
addLoggerVar(codeunit, visitor)
class LogClass(LibClass):
def __init__(self):
LibClass.__init__(self, 'Log')
self.addDef(LogFunction(self, 'trace', 5))
self.addDef(LogFunction(self, 'debug', logging.DEBUG))
self.addDef(LogFunction(self, 'info', logging.INFO))
self.addDef(LogFunction(self, 'warn', logging.WARNING))
self.addDef(LogFunction(self, 'event', logging.ERROR))
self.addDef(LogFunction(self, 'error', logging.ERROR))
self.addDef(LogFunction(self, 'critical', logging.CRITICAL))
self.addDef(LogFunction(self, 'fatal', logging.CRITICAL))
| StarcoderdataPython |
4691 | <filename>constellation_forms/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-15 00:56
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Form',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('form_id', models.IntegerField()),
('version', models.IntegerField()),
('name', models.TextField()),
('description', models.TextField(blank=True)),
('elements', django.contrib.postgres.fields.jsonb.JSONField()),
],
options={
'ordering': ('-version',),
'db_table': 'form',
},
),
migrations.CreateModel(
name='FormSubmission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.IntegerField(choices=[(0, 'draft'), (1, 'submitted'), (2, 'approved'), (3, 'denied')])),
('modified', models.DateField()),
('submission', django.contrib.postgres.fields.jsonb.JSONField()),
('form', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='constellation_forms.Form')),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'form_submission',
},
),
migrations.CreateModel(
name='Validator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('regex', models.TextField()),
],
options={
'db_table': 'validators',
},
),
migrations.AlterUniqueTogether(
name='form',
unique_together=set([('form_id', 'version')]),
),
]
| StarcoderdataPython |
198569 | #!/usr/bin/python
"""
Helper script to split monthly input geojson files with all detections for example from E13d into separate entries per grouped by AOI and timestamp
Also creates a csv for additional indicator with `suffix` to allow grouped view of these data
Usage:
# update `list_of_dates_to_process` with dates in YYYYMM format and do:
docker run --rm -it -v $PWD:/working eurodatacube/jupyter-user:0.19.6 /opt/conda/envs/eurodatacube-0.19.6/bin/python3 /working/convert_monthly.py
"""
import json
import os
import csv
import datetime
from glob import glob
import time
import geopandas as gpd
import shapely
import pandas as pd
import numpy as np
new_suffix = 'a' # suffix of new indicator to create
indicator = 'E13d' # indicator to fetch jsons of
column_names_csv = ['AOI', 'Country', 'Region', 'City', 'Site Name', 'Description', 'Method', 'EO Sensor', 'Input Data', 'Indicator code', 'Time', 'Measurement Value', 'Reference Description', 'Reference time', 'Reference value', 'Rule', 'Indicator Value', 'Sub-AOI', 'Y axis', 'Indicator Name', 'Color code', 'Data Provider', 'AOI_ID', 'Update Frequency']
list_of_dates_to_process = ['201807', '201808', '201809', '201810', '201811', '201812', '201901', '201902', '201903', '201904', '201905', '201906', '201907', '201908', '201909', '201910', '201911', '201912', '202001', '202002', '202003', '202004', '202005', '202006', '202007', '202008', '202009'] # to be updated
def split_aoi(aoi):
# splits aoi column into lat, lon variables
lat, lon = aoi.split(',')
return float(lat), float(lon)
def feature_collection(ftrs):
# creates a feature collection from list of features
return {"features": ftrs, "type": "FeatureCollection"}
def try_parsing_date(text, formats=('%Y-%m-%dT%H:%M:%S',)):
# tries to create datetime from text with given formats tuple
for fmt in formats:
try:
return datetime.datetime.strptime(text, fmt)
except ValueError:
pass
raise ValueError('time not provided in valid format')
featuresPath = f'/working/eodash-data/features/{indicator}/'
output_csv_path = f'/working/eodash-data/data/{indicator}_detections.csv'
if not os.path.exists(output_csv_path):
with open(output_csv_path, 'w') as csvv:
w = csv.writer(csvv)
# writes header
w.writerow(column_names_csv)
def convert(path2, indicator):
# initialize output
new_features = {} # key=aoiId_time -fname, ftrs
path = f'/working/eodash-data/features/{indicator}/{indicator}_{path2}.geojson'
yyyy = path.split('_')[1][0:4]
mm = path.split('_')[1][4:6]
# load monthly geojson with date & geometry of detection as 1 feature
gdf = gpd.read_file(path)
# load individual geojson for first poi from glob (does not matter which is taken), as usually these data match
poi_json_glob = f'/working/eodash-data/internal/*{indicator}*.json'
poi_json_path = glob(poi_json_glob)[0]
with open(poi_json_path) as poi_json:
poiJson = json.load(poi_json)
# extract matching entry based on time of monthly file - to later extract for example Input Data value etc.
single_entry_time = [i for i in poiJson if i['time'] == f'{yyyy}-{mm}-01T00:00:00']
internal_data_path = '/working/data/internal/pois_eodash.json'
with open(internal_data_path) as inte:
internalJson = json.load(inte)
# filter only pois from selected indicator
inter = [item for item in internalJson if item['indicator'] == indicator]
# create geopandas dataframe to enable easy coordinate match
df = pd.DataFrame.from_dict(inter)
# extract coords to create geometry column
df['lat'], df['lon'] = zip(*df['aoi'].map(split_aoi))
gdf_internal = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon, df.lat))
# create multipoint to be able to find nearest neighbour from list
multipoint = gdf_internal.geometry.unary_union
# go over each individual entry of the geojson, extract geometry and date
for index, row in gdf.iterrows():
data = {}
# find nearest point (aoi_id)
_, nearest_geom = shapely.ops.nearest_points(row.geometry, multipoint)
# get relevant aoi_id
found = gdf_internal.loc[(gdf_internal['lon'] == nearest_geom.x) & (gdf_internal['lat'] == nearest_geom.y)]
# update AOI_ID, adding suffix in case poi without suffix was found
aoiId = f"{found['aoiID'].iloc[0]}"
if not aoiId.endswith(new_suffix):
aoiId = f"{aoiId}{new_suffix}"
data['AOI_ID'] = aoiId
# update AOI, adding last digit to lon to avoid same digits as source
data['AOI'] = f"{found['aoi'].iloc[0]}1"
# add time
data['Time'] = row['TIMESTAMP UTC']
# add values for some columns from internal
data['Region'] = found['region'].iloc[0]
data['Country'] = 'all' # needs update to 'all' in case if we want to display map (showMap function)
data['City'] = found['city'].iloc[0]
data['Description'] = found['description'].iloc[0]
data['Indicator code'] = f"{found['indicator'].iloc[0]}"
data['Site Name'] = found['siteName'].iloc[0]
data['Indicator Name'] = found['indicatorName'].iloc[0]
# data['Color code'] = found['lastColorCode']
data['Sub-AOI'] = found['subAoi'].iloc[0]
# data['Update Frequency'] = found['updateFrequency']
# data['Input Data'] = single_entry_time[0]['input_data']
# data['EO Sensor'] = single_entry_time[0]['eo_sensor']
data['Input Data'] = 'Sentinel 2 L2A'
data['EO Sensor'] = 'Sentinel 2'
# dirty and superslow way of merging columns of csv with actual limited data (merge all columns dataframe with sparser dataframe containing only some data)
# todo, redo this for performance reasons
csv_read = pd.read_csv(output_csv_path, header=0)
data_as_df = pd.DataFrame(data, index=[0])
save = pd.concat([csv_read, data_as_df[csv_read.columns.intersection(data_as_df.columns)]]).replace(np.nan, '', regex=True).drop_duplicates()
# save csv
save.to_csv(output_csv_path, mode='w', index=False)
time_for_filename = try_parsing_date(row['TIMESTAMP UTC']).strftime('%Y%m%dT%H%M%S')
key_for_ftrs_dict = f'{aoiId}_{time_for_filename}'
properties = row.to_dict()
# remove geometry column
del properties['geometry']
# store individual geojson features to a collection for later write
feature = {"type": "Feature", "geometry": shapely.geometry.mapping(row['geometry']), "properties": properties}
if (key_for_ftrs_dict in new_features):
new_features[key_for_ftrs_dict].append(feature)
else:
new_features[key_for_ftrs_dict] = [feature]
# write individual geojsons
for key in new_features:
with open(f'{featuresPath}{indicator}_{key}.geojson', 'w') as output_geojson:
json.dump(feature_collection(new_features[key]), output_geojson)
# track time
start = time.time()
# substitute with desired months or just single month (will append to existing csv if exists)
for path in list_of_dates_to_process:
convert(path, indicator)
print(f'step {path} has taken {time.time() - start} seconds')
start = time.time()
| StarcoderdataPython |
4811396 | import csv
from sklearn import datasets
from sklearn import metrics
from sklearn.svm import SVC
from pprint import pprint
import json
import random
import sys
import pickle
import preprocessing
import jpype as jp
import zemberek.normalizer
import length, ner_tagging, pos_tagging
import csv
import words
######################## Prepare for preprocessing
ZEMBEREK_PATH = 'zemberek/bin/zemberek-full.jar'
jp.startJVM(jp.getDefaultJVMPath(), '-ea', '-Djava.class.path=%s' % (ZEMBEREK_PATH))
zemberek.normalizer.init()
zemberek.nertagger.init_libs()
######################## Load pretrained model
model = pickle.load(open("svm_model.sav", "rb"))
def extract_feature(tokens):
pos_columns = pos_tagging.get_column_names()
word_columns = words.get_column_names()
fieldnames = []
fieldnames = fieldnames + ['length']
fieldnames = fieldnames + ['LOCATION', 'ORGANIZATION', 'PERSON', 'O']
fieldnames = fieldnames + pos_columns
#fieldnames = fieldnames + word_columns
the_row = []
length_vector = length.extract_feature(tokens)
the_row = the_row + length_vector
ner_vector = ner_tagging.extract_feature(tokens)
the_row = the_row + ner_vector
pos_vector = pos_tagging.extract_feature(tokens)
the_row = the_row + pos_vector
#bow_vector = words.extract_feature(tokens)
#the_row = the_row + bow_vector
return the_row
while True:
test_string = input("Please enter some input:\n")
preprocessed = preprocessing.perform_preprocessing(test_string)
feature_vector = extract_feature(preprocessed)
test_data = []
test_data.append(feature_vector)
predicted = model.predict_proba(test_data)
print("Prob(Check-Worthy) =", predicted[0][1]) | StarcoderdataPython |
3208652 | <gh_stars>0
Python 3.9.5 (tags/v3.9.5:0a7dcbd, May 3 2021, 17:27:52) [MSC v.1928 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> 3+2
5
>>> 3-2
1
>>> 2-3
-1
>>> 2*3
6
>>> 2/3
0.6666666666666666
>>> 3%2
1
>>> 3**2
9
>>> 2**4
16
>>> dist1 = 15
>>> dist2 = 15
>>> total_dist = dist1 + dist1
>>> total_dist = dist1 + dist2
>>> mph = 3
>>> time = total_dist / mph
>>> time
10.0
>>> float_time = 10.123456789
>>> round(float_time, 2)
10.12
>>> 6-5.7
0.2999999999999998
>>> round(6-5.7)
0
>>> | StarcoderdataPython |
4837624 | # import json
import yaml
import sys
import logging
import os
import os.path
from collections import namedtuple
# These can be overridden in the config file.
# They are just here some sensible defaults
# so the module shill functions
BUILT_IN_DEFAULTS = {
'meta':{
"version": "dev_build",
"app" : "unknown",
"config_directory": '.',
},
'logging': {
"logfile" : None,
"loglvl" : "debug",
"log_rotation": False,
"logfmt" : '%(asctime)s %(name)s %(levelname)s: %(message)s',
"datefmt" : '%d-%m-%y %I:%M:%S %p',
"debugging" : False,
}
}
# Instert default values for app config here
# instead of mixing them with BUILT_IN_DEFAULTS
# These can be use to override BUILT_IN_DEFAULTS as well
APP_DEFAULTS = {
}
BUILT_IN_DEFAULTS.update(APP_DEFAULTS)
def parseLogLevel(text, default = 30):
text = text.lower()
levelValues = {
'critical' : 50,
'error' : 40,
'warning' : 30,
'info' : 20,
'debug' : 10
}
return levelValues.get(text, default)
def recursivelyUpdateDict(orig, new):
updated = orig.copy()
updateFrom = new.copy()
for key, value in updated.items():
if key in new:
if not isinstance(value, dict):
updated[key] = updateFrom.pop(key)
else:
updated[key] = recursivelyUpdateDict(value, updateFrom.pop(key))
for key, value in updateFrom.items():
updated[key] = value
return updated
def createNamespace(mapping, name = 'config'):
data = {}
for key, value in mapping.items():
if not isinstance(value, dict):
data[key] = value
else:
data[key] = createNamespace(value, key)
nt = namedtuple(name, list(data.keys()))
return nt(**data)
def loadYAML(path):
with open(path) as configFile:
return yaml.load(configFile)
def loadImports(mapping, configDir = '.'):
if not isinstance(mapping, dict):
return mapping
loaded = mapping.copy()
parsed = {}
for key, value in loaded.items():
if isinstance(value, str):
if os.path.exists(configDir + '/' + value) and value.split('.')[-1] == 'yaml':
parsed[key] = loadImports(loadYAML(configDir + '/' + value), configDir)
else:
parsed[key] = value
elif isinstance(value, dict):
parsed[key] = loadImports(value, configDir)
else:
parsed[key] = value
return parsed
def loadConfig(path = 'main.yaml'):
configDir = os.path.dirname(path)
loadedConfig = loadImports(loadYAML(path), configDir = configDir)
config = recursivelyUpdateDict(BUILT_IN_DEFAULTS, loadedConfig)
config = updateFromEnv(config)
config['logging']['loglvl'] = parseLogLevel(config['logging']['loglvl']) # Parse the loglvl
if config['logging']['loglvl'] <= 10:
config['logging']['debugging'] = True
return createNamespace(config) # Return the config for good measure
def loadFromEnv(key):
return os.getenv(key, None)
def updateFromEnv(config, namespace = []):
newConfig = config.copy()
for key, value in config.items():
if not isinstance(value, dict):
configVar = '_'.join(namespace + [key.upper()])
env = loadFromEnv(configVar)
if env:
newConfig[key] = env
else:
newConfig[key] = updateFromEnv(value, namespace=namespace + [key.upper()])
return newConfig
configPath = None
if os.path.exists('config_stub.yaml'):
configPath = loadYAML('config_stub.yaml').get('config_directory', None)
if loadFromEnv('META_CONFIG_DIRECTORY'):
configPath = loadFromEnv('META_CONFIG_DIRECTORY')
if not configPath:
configPath = BUILT_IN_DEFAULTS['meta']['config_directory']
def getParentModule(name):
return sys.modules.get(name, None)
def loadByName(name, root = None):
if isinstance(root, str):
root = getParentModule(root)
if root:
sub = name.split('.')
if len(sub) > 1:
return loadByName('.'.join(sub[1:]), root = getattr(root, sub[0], None))
else:
return getattr(root, name, None)
else:
raise KeyError('%s not found in config!'%name)
config = loadConfig(configPath + '/config.yaml') | StarcoderdataPython |
1624772 | <gh_stars>0
import io
import itertools
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
def plot_to_image(figure, file_name=None):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call.
"""
# Save the plot to a file
if file_name:
plt.savefig(file_name)
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format="png")
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def plot_confusion_matrix(cm, class_names):
"""Returns a matplotlib figure containing the plotted confusion matrix.
Args:
cm (array, shape = [n, n]): a confusion matrix of integer classes
class_names (array, shape = [n]): String names of the integer classes
"""
figure = plt.figure(figsize=(len(class_names), len(class_names)))
plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45, ha="right")
plt.yticks(tick_marks, class_names)
# Normalize the confusion matrix.
cm = np.around(cm.astype("float") / cm.sum(axis=1)[:, np.newaxis], decimals=2)
# Use white text if squares are dark; otherwise black.
threshold = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
return figure
| StarcoderdataPython |
3239375 | <gh_stars>0
from django.apps import AppConfig
class LocalusersConfig(AppConfig):
name = 'localusers'
| StarcoderdataPython |
107896 | # -*- coding: utf-8 -*-
import json
import os
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import pandas as pd
from sklearn.model_selection import train_test_split
import csv
import pickle
import src.data.unify_datasets as unify
@click.command()
@click.option('--dataset', help='Dataset which you like to preprocess')
def main(dataset):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
trigger_load_dataset(dataset)
def trigger_load_dataset(dataset):
logger = logging.getLogger(__name__)
# Load dot environment file
load_dotenv(find_dotenv())
datasets = os.getenv("DATASETS")
if dataset not in datasets:
msg = 'Dataset {} is not defined!'.format(dataset)
logger.error(msg)
raise ValueError(msg)
else:
load_and_split_dataset(dataset)
def load_and_split_dataset(dataset):
logger = logging.getLogger(__name__)
data_dir = os.environ['DATA_DIR']
data_dir = Path(data_dir)
path_to_raw_data = data_dir.joinpath('data/raw', dataset)
dataset_collector = {}
# Data is already split into train/validation/test + provided in json format
if dataset == 'wdc_ziqi':
dict_files = {'train': 'train.json', 'validate': 'validation.json', 'test': 'task2_testset_with_labels.json'}
for value in dict_files:
path_to_file = path_to_raw_data.joinpath(dict_files[value])
df_data = load_line_json(path_to_file)
dataset_collector[value] = unify.reduce_schema(df_data, dataset)
logger.info('Dataset {} loaded!'.format(dataset))
elif os.path.exists(path_to_raw_data.joinpath('split')):
logger.info('Dataset {} is already split!'.format(dataset))
for dataset_type in ['train', 'test']:
train_dataset_name = 'split/{}_data_{}.csv'.format(dataset, dataset_type)
path_to_raw_dataset = path_to_raw_data.joinpath(train_dataset_name)
df_raw_data = pd.read_csv(path_to_raw_dataset.absolute(), sep=',', encoding='utf-8', quotechar='"',
quoting=csv.QUOTE_ALL)
logger.info('Dataset {} loaded!'.format(dataset))
dataset_collector[dataset_type] = unify.reduce_schema(df_raw_data, dataset)
logger.info('Dataset {} reduced to target schema!'.format(dataset))
else:
logger.info('Dataset {} is not split yet!'.format(dataset))
dataset_file = '{}_data_raw.csv'.format(dataset)
path_to_raw_dataset = path_to_raw_data.joinpath(dataset_file)
df_raw_data = pd.read_csv(path_to_raw_dataset.absolute(), sep=',', encoding='utf-8', quotechar='"',
quoting=csv.QUOTE_ALL)
logger.info('Dataset {} loaded!'.format(dataset))
df_data_reduced_schema = unify.reduce_schema(df_raw_data, dataset)
logger.info('Dataset {} reduced to target schema!'.format(dataset))
dataset_collector['train'], dataset_collector['test'] = split_dataset(df_data_reduced_schema)
if dataset != 'wdc_ziqi':
dataset_collector['train'], dataset_collector['validate'] = split_dataset(dataset_collector['train'])
logger.info('Dataset {} split into train, validate, test!'.format(dataset))
for split in dataset_collector:
persist_dataset(dataset_collector[split], dataset, split)
def load_line_json(path):
logger = logging.getLogger(__name__)
df_ds = pd.DataFrame()
count = 0
with open(path) as fp:
while True:
count += 1
line = fp.readline()
if not line:
logger.info('Read {} lines from {}!'.format(count, path))
break
json_object = json.loads(line)
df_row = pd.DataFrame.from_dict(json_object, orient='index').transpose()
df_ds = df_ds.append(df_row)
return df_ds
def split_dataset(df_dataset):
"""Split dataset into train, test and validation set"""
# split into training and testing set 80:20 (stratify)
random = int(os.getenv("RANDOM_STATE"))
# Intermediate Solution: if only one leaf element exists, these elements are added to the trainings set
# To-Do: Find more sustainable solution
only_one_root_element = [k for k, v in df_dataset['path_list'].value_counts().items()
if df_dataset['path_list'].value_counts()[k] < 2]
df_one_element = df_dataset[df_dataset['path_list'].isin(only_one_root_element)]
df_dataset = df_dataset[~df_dataset['path_list'].isin(only_one_root_element)]
general_columns = ['title', 'description', 'brand', 'category', 'path_list']
available_columns = [column for column in df_dataset.columns if column in general_columns]
df_data_train, df_data_test, df_data_train_target, df_data_test_target = train_test_split(
df_dataset, df_dataset[available_columns], test_size=0.2,
random_state=random, shuffle=True, stratify=df_dataset[['path_list']])
df_data_train = pd.concat([df_data_train, df_one_element[available_columns]])
return df_data_train, df_data_test
def persist_dataset(df_dataset, dataset, split_name):
logger = logging.getLogger(__name__)
data_dir = os.environ['DATA_DIR']
data_dir = Path(data_dir)
relative_path = 'data/processed/{}/split/raw/{}_data_{}.pkl'.format(dataset, split_name, dataset)
file_path = data_dir.joinpath(relative_path)
# Create new path if it does not exist yet!
if not os.path.exists(file_path):
logger.info('Path {} does not exist!'.format(file_path))
path_parts = relative_path.split('/')
path_parts = path_parts[:-1]
file_path = data_dir
for part in path_parts:
file_path = file_path.joinpath(part)
if not os.path.exists(file_path):
os.mkdir(file_path)
logger.info('Path {} created!'.format(file_path))
file_path = file_path.joinpath('{}_data_{}.pkl'.format(split_name, dataset))
with open(file_path, 'wb') as f:
pickle.dump(df_dataset, f)
logger.info('{} Split of Dataset saved at {}!'.format(split_name, dataset, file_path))
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| StarcoderdataPython |
152849 | <gh_stars>0
"""
General utility routines shared by various web related modules.
"""
import Cookie
import urllib
import time
from datetime import datetime
try:
from email.utils import parsedate
except ImportError: # Python < 2.5
from email.Utils import parsedate
from tiddlyweb.serializer import Serializer
from tiddlyweb.web.http import HTTP415, HTTP400
from tiddlyweb.util import sha
def get_route_value(environ, name):
"""
Retrieve and decode from UTF-8 data provided in WSGI route.
If name is not present in the route, allow KeyError to raise.
If the provided data is not URI escaped UTF-8, raise and HTTP400
"""
try:
value = environ['wsgiorg.routing_args'][1][name]
value = urllib.unquote(value).decode('utf-8')
except UnicodeDecodeError, exc:
raise HTTP400('incorrect encoding for %s, UTF-8 required: %s',
exc)
return value
def get_serialize_type(environ):
"""
Look in the environ to determine which serializer
we should use for this request.
"""
config = environ['tiddlyweb.config']
accept = environ.get('tiddlyweb.type')[:]
ext = environ.get('tiddlyweb.extension')
serializers = config['serializers']
serialize_type, mime_type = None, None
if type(accept) == str:
accept = [accept]
while len(accept) and serialize_type == None:
candidate_type = accept.pop(0)
try:
serialize_type, mime_type = serializers[candidate_type]
except KeyError:
pass
if not serialize_type:
if ext:
raise HTTP415('%s type unsupported' % ext)
# If we are a PUT and we haven't found a serializer, don't
# state a default as that makes no sense.
if environ['REQUEST_METHOD'] == 'GET':
default_serializer = config['default_serializer']
serialize_type, mime_type = serializers[default_serializer]
return serialize_type, mime_type
def handle_extension(environ, resource_name):
"""
Look for an extension on the provided resource_name and
trim it off to give the "real" resource_name.
"""
extension = environ.get('tiddlyweb.extension')
extension_types = environ['tiddlyweb.config']['extension_types']
if extension and extension in extension_types:
try:
resource_name = resource_name[0:resource_name.rindex('.'
+ extension)]
except ValueError:
pass
else:
try:
del(environ['tiddlyweb.extension'])
except KeyError:
pass
return resource_name
def http_date_from_timestamp(timestamp):
"""
Turn a modifier or created tiddler timestamp
into a proper formatted HTTP date.
"""
try:
try:
timestamp_datetime = datetime(*(time.strptime(timestamp,
'%Y%m%d%H%M')[0:6]))
except ValueError:
timestamp_datetime = datetime(*(time.strptime(timestamp,
'%Y%m%d%H%M%S')[0:6]))
except ValueError:
timestamp_datetime = datetime.utcnow()
return timestamp_datetime.strftime('%a, %d %b %Y %H:%M:%S GMT')
def datetime_from_http_date(http_datestring):
"""
Turn an HTTP formatted date into a datetime
object.
"""
if ';' in http_datestring:
http_datestring = http_datestring.split(';', 1)[0].rstrip().lstrip()
return datetime(*parsedate(http_datestring)[:6])
def make_cookie(name, value, mac_key=None, path=None,
expires=None, httponly=True, domain=None):
"""
Create a cookie string, optionally with a MAC, path and
expires value. Expires is in seconds.
"""
cookie = Cookie.SimpleCookie()
value = value.encode('utf-8')
if mac_key:
secret_string = sha('%s%s' % (value, mac_key)).hexdigest()
cookie[name] = '%s:%s' % (value, secret_string)
else:
cookie[name] = value
if path:
cookie[name]['path'] = path
if expires:
cookie[name]['max-age'] = expires
if domain:
cookie[name]['domain'] = domain
output = cookie.output(header='').lstrip().rstrip()
if httponly:
output += '; httponly'
return output
def server_base_url(environ):
"""
Using information in tiddlyweb.config, construct
the base URL of the server, sans the trailing /.
"""
return '%s%s' % (server_host_url(environ), _server_prefix(environ))
def server_host_url(environ):
"""
Generate the scheme and host portion of our server url.
"""
server_host = environ['tiddlyweb.config']['server_host']
port = str(server_host['port'])
if port == '80' or port == '443':
port = ''
else:
port = ':%s' % port
return '%s://%s%s' % (server_host['scheme'], server_host['host'], port)
def _server_prefix(environ):
"""
Get the server_prefix out of tiddlyweb.config.
"""
config = environ.get('tiddlyweb.config', {})
return config.get('server_prefix', '')
def encode_name(name):
"""
Encode a unicode as utf-8 and then url encode that
string. Use for entity titles in URLs.
"""
return urllib.quote(name.encode('utf-8'), safe='')
def html_encode(text):
"""
Encode &, < and > entities in text that will
be used in/as HTML.
"""
return (text.replace('&', '&').replace('<', '<').
replace('>', '>'))
def escape_attribute_value(text):
"""
escape common character entities, incuding double quotes
in attribute values
This assumes values are enclosed in double quotes (key="value").
"""
try:
return html_encode(text).replace('"', '"')
except AttributeError: # value might be None
return text
def entity_etag(environ, entity):
"""
Construct an etag from the JSON rep of an entity.
"""
try:
_, mime_type = get_serialize_type(environ)
mime_type = mime_type.split(';', 1)[0].strip()
except (AttributeError, TypeError):
mime_type = ''
if 'tiddlyweb.etag_serializer' in environ:
serializer = environ['tiddlyweb.etag_serializer']
else:
serializer = Serializer('json', environ)
environ['tiddlyweb.etag_serializer'] = serializer
serializer.object = entity
content = serializer.to_string()
return '"%s"' % sha(content + mime_type).hexdigest()
def bag_etag(environ, bag):
"""
Construct an etag for a bag.
"""
return entity_etag(environ, bag)
def bag_url(environ, bag, full=True):
"""
Construct a URL for a bag.
"""
bag_link = 'bags/%s' % encode_name(bag.name)
if full:
return '%s/%s' % (server_base_url(environ), bag_link)
else:
return '%s/%s' % (_server_prefix(environ), bag_link)
def tiddler_etag(environ, tiddler):
"""
Construct an etag for a tiddler from the tiddler's attributes,
but not its text.
"""
text = tiddler.text
tiddler.text = ''
bag_name = tiddler.bag or ''
tiddler_id = '"%s/%s/%s:' % (encode_name(bag_name),
encode_name(tiddler.title), tiddler.revision)
etag = entity_etag(environ, tiddler)
tiddler.text = text
etag = etag.replace('"', tiddler_id, 1)
return etag
def tiddler_url(environ, tiddler, container='bags', full=True):
"""
Construct a URL for a tiddler. If the tiddler has a _canonical_uri
field, use that instead.
"""
if '_canonical_uri' in tiddler.fields:
return tiddler.fields['_canonical_uri']
container_name = tiddler.recipe if container == 'recipes' else tiddler.bag
tiddler_link = '%s/%s/tiddlers/%s' % (container,
encode_name(container_name), encode_name(tiddler.title))
if full:
return '%s/%s' % (server_base_url(environ), tiddler_link)
else:
return '%s/%s' % (_server_prefix(environ), tiddler_link)
def recipe_etag(environ, recipe):
"""
Construct an etag for a recipe.
"""
return entity_etag(environ, recipe)
def recipe_url(environ, recipe, full=True):
"""
Construct a URL for a recipe.
"""
recipe_link = 'recipes/%s' % encode_name(recipe.name)
if full:
return '%s/%s' % (server_base_url(environ), recipe_link)
else:
return '%s/%s' % (_server_prefix(environ), recipe_link)
| StarcoderdataPython |
1739012 | <reponame>linuxonly801/awesome-DeepLearning
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import sys
import math
from collections import defaultdict
from motmetrics.math_util import quiet_divide
import numpy as np
import pandas as pd
import paddle
import paddle.nn.functional as F
from .metrics import Metric
import motmetrics as mm
import openpyxl
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = ['MCMOTEvaluator', 'MCMOTMetric']
METRICS_LIST = [
'num_frames', 'num_matches', 'num_switches', 'num_transfer', 'num_ascend',
'num_migrate', 'num_false_positives', 'num_misses', 'num_detections',
'num_objects', 'num_predictions', 'num_unique_objects', 'mostly_tracked',
'partially_tracked', 'mostly_lost', 'num_fragmentations', 'motp', 'mota',
'precision', 'recall', 'idfp', 'idfn', 'idtp', 'idp', 'idr', 'idf1'
]
NAME_MAP = {
'num_frames': 'num_frames',
'num_matches': 'num_matches',
'num_switches': 'IDs',
'num_transfer': 'IDt',
'num_ascend': 'IDa',
'num_migrate': 'IDm',
'num_false_positives': 'FP',
'num_misses': 'FN',
'num_detections': 'num_detections',
'num_objects': 'num_objects',
'num_predictions': 'num_predictions',
'num_unique_objects': 'GT',
'mostly_tracked': 'MT',
'partially_tracked': 'partially_tracked',
'mostly_lost': 'ML',
'num_fragmentations': 'FM',
'motp': 'MOTP',
'mota': 'MOTA',
'precision': 'Prcn',
'recall': 'Rcll',
'idfp': 'idfp',
'idfn': 'idfn',
'idtp': 'idtp',
'idp': 'IDP',
'idr': 'IDR',
'idf1': 'IDF1'
}
def parse_accs_metrics(seq_acc, index_name, verbose=False):
"""
Parse the evaluation indicators of multiple MOTAccumulator
"""
mh = mm.metrics.create()
summary = MCMOTEvaluator.get_summary(seq_acc, index_name, METRICS_LIST)
summary.loc['OVERALL', 'motp'] = (summary['motp'] * summary['num_detections']).sum() / \
summary.loc['OVERALL', 'num_detections']
if verbose:
strsummary = mm.io.render_summary(
summary, formatters=mh.formatters, namemap=NAME_MAP)
print(strsummary)
return summary
def seqs_overall_metrics(summary_df, verbose=False):
"""
Calculate overall metrics for multiple sequences
"""
add_col = [
'num_frames', 'num_matches', 'num_switches', 'num_transfer',
'num_ascend', 'num_migrate', 'num_false_positives', 'num_misses',
'num_detections', 'num_objects', 'num_predictions',
'num_unique_objects', 'mostly_tracked', 'partially_tracked',
'mostly_lost', 'num_fragmentations', 'idfp', 'idfn', 'idtp'
]
calc_col = ['motp', 'mota', 'precision', 'recall', 'idp', 'idr', 'idf1']
calc_df = summary_df.copy()
overall_dic = {}
for col in add_col:
overall_dic[col] = calc_df[col].sum()
for col in calc_col:
overall_dic[col] = getattr(MCMOTMetricOverall, col + '_overall')(
calc_df, overall_dic)
overall_df = pd.DataFrame(overall_dic, index=['overall_calc'])
calc_df = pd.concat([calc_df, overall_df])
if verbose:
mh = mm.metrics.create()
str_calc_df = mm.io.render_summary(
calc_df, formatters=mh.formatters, namemap=NAME_MAP)
print(str_calc_df)
return calc_df
class MCMOTMetricOverall(object):
def motp_overall(summary_df, overall_dic):
motp = quiet_divide((summary_df['motp'] *
summary_df['num_detections']).sum(),
overall_dic['num_detections'])
return motp
def mota_overall(summary_df, overall_dic):
del summary_df
mota = 1. - quiet_divide(
(overall_dic['num_misses'] + overall_dic['num_switches'] +
overall_dic['num_false_positives']), overall_dic['num_objects'])
return mota
def precision_overall(summary_df, overall_dic):
del summary_df
precision = quiet_divide(overall_dic['num_detections'], (
overall_dic['num_false_positives'] + overall_dic['num_detections']))
return precision
def recall_overall(summary_df, overall_dic):
del summary_df
recall = quiet_divide(overall_dic['num_detections'],
overall_dic['num_objects'])
return recall
def idp_overall(summary_df, overall_dic):
del summary_df
idp = quiet_divide(overall_dic['idtp'],
(overall_dic['idtp'] + overall_dic['idfp']))
return idp
def idr_overall(summary_df, overall_dic):
del summary_df
idr = quiet_divide(overall_dic['idtp'],
(overall_dic['idtp'] + overall_dic['idfn']))
return idr
def idf1_overall(summary_df, overall_dic):
del summary_df
idf1 = quiet_divide(2. * overall_dic['idtp'], (
overall_dic['num_objects'] + overall_dic['num_predictions']))
return idf1
def read_mcmot_results_union(filename, is_gt, is_ignore):
results_dict = dict()
if os.path.isfile(filename):
all_result = np.loadtxt(filename, delimiter=',')
if all_result.shape[0] == 0 or all_result.shape[1] < 7:
return results_dict
if is_ignore:
return results_dict
if is_gt:
# only for test use
all_result = all_result[all_result[:, 7] != 0]
all_result[:, 7] = all_result[:, 7] - 1
if all_result.shape[0] == 0:
return results_dict
class_unique = np.unique(all_result[:, 7])
last_max_id = 0
result_cls_list = []
for cls in class_unique:
result_cls_split = all_result[all_result[:, 7] == cls]
result_cls_split[:, 1] = result_cls_split[:, 1] + last_max_id
# make sure track id different between every category
last_max_id = max(np.unique(result_cls_split[:, 1])) + 1
result_cls_list.append(result_cls_split)
results_con = np.concatenate(result_cls_list)
for line in range(len(results_con)):
linelist = results_con[line]
fid = int(linelist[0])
if fid < 1:
continue
results_dict.setdefault(fid, list())
if is_gt:
score = 1
else:
score = float(linelist[6])
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
cls = int(linelist[7])
results_dict[fid].append((tlwh, target_id, cls, score))
return results_dict
def read_mcmot_results(filename, is_gt, is_ignore):
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
for line in f.readlines():
linelist = line.strip().split(',')
if len(linelist) < 7:
continue
fid = int(linelist[0])
if fid < 1:
continue
cid = int(linelist[7])
if is_gt:
score = 1
# only for test use
cid -= 1
else:
score = float(linelist[6])
cls_result_dict = results_dict.setdefault(cid, dict())
cls_result_dict.setdefault(fid, list())
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
cls_result_dict[fid].append((tlwh, target_id, score))
return results_dict
def read_results(filename,
data_type,
is_gt=False,
is_ignore=False,
multi_class=False,
union=False):
if data_type in ['mcmot', 'lab']:
if multi_class:
if union:
# The results are evaluated by union all the categories.
# Track IDs between different categories cannot be duplicate.
read_fun = read_mcmot_results_union
else:
# The results are evaluated separately by category.
read_fun = read_mcmot_results
else:
raise ValueError('multi_class: {}, MCMOT should have cls_id.'.
format(multi_class))
else:
raise ValueError('Unknown data type: {}'.format(data_type))
return read_fun(filename, is_gt, is_ignore)
def unzip_objs(objs):
if len(objs) > 0:
tlwhs, ids, scores = zip(*objs)
else:
tlwhs, ids, scores = [], [], []
tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
return tlwhs, ids, scores
def unzip_objs_cls(objs):
if len(objs) > 0:
tlwhs, ids, cls, scores = zip(*objs)
else:
tlwhs, ids, cls, scores = [], [], [], []
tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
ids = np.array(ids)
cls = np.array(cls)
scores = np.array(scores)
return tlwhs, ids, cls, scores
class MCMOTEvaluator(object):
def __init__(self, data_root, seq_name, data_type, num_classes):
self.data_root = data_root
self.seq_name = seq_name
self.data_type = data_type
self.num_classes = num_classes
self.load_annotations()
self.reset_accumulator()
self.class_accs = []
def load_annotations(self):
assert self.data_type == 'mcmot'
self.gt_filename = os.path.join(self.data_root, '../', '../',
'sequences',
'{}.txt'.format(self.seq_name))
def reset_accumulator(self):
import motmetrics as mm
mm.lap.default_solver = 'lap'
self.acc = mm.MOTAccumulator(auto_id=True)
def eval_frame_dict(self, trk_objs, gt_objs, rtn_events=False, union=False):
import motmetrics as mm
mm.lap.default_solver = 'lap'
if union:
trk_tlwhs, trk_ids, trk_cls = unzip_objs_cls(trk_objs)[:3]
gt_tlwhs, gt_ids, gt_cls = unzip_objs_cls(gt_objs)[:3]
# get distance matrix
iou_distance = mm.distances.iou_matrix(
gt_tlwhs, trk_tlwhs, max_iou=0.8)
# Set the distance between objects of different categories to nan
gt_cls_len = len(gt_cls)
trk_cls_len = len(trk_cls)
# When the number of GT or Trk is 0, iou_distance dimension is (0,0)
if gt_cls_len != 0 and trk_cls_len != 0:
gt_cls = gt_cls.reshape(gt_cls_len, 1)
gt_cls = np.repeat(gt_cls, trk_cls_len, axis=1)
trk_cls = trk_cls.reshape(1, trk_cls_len)
trk_cls = np.repeat(trk_cls, gt_cls_len, axis=0)
iou_distance = np.where(gt_cls == trk_cls, iou_distance, np.nan)
else:
trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
# get distance matrix
iou_distance = mm.distances.iou_matrix(
gt_tlwhs, trk_tlwhs, max_iou=0.8)
self.acc.update(gt_ids, trk_ids, iou_distance)
if rtn_events and iou_distance.size > 0 and hasattr(self.acc,
'mot_events'):
events = self.acc.mot_events # only supported by https://github.com/longcw/py-motmetrics
else:
events = None
return events
def eval_file(self, result_filename):
# evaluation of each category
gt_frame_dict = read_results(
self.gt_filename,
self.data_type,
is_gt=True,
multi_class=True,
union=False)
result_frame_dict = read_results(
result_filename,
self.data_type,
is_gt=False,
multi_class=True,
union=False)
for cid in range(self.num_classes):
self.reset_accumulator()
cls_result_frame_dict = result_frame_dict.setdefault(cid, dict())
cls_gt_frame_dict = gt_frame_dict.setdefault(cid, dict())
# only labeled frames will be evaluated
frames = sorted(list(set(cls_gt_frame_dict.keys())))
for frame_id in frames:
trk_objs = cls_result_frame_dict.get(frame_id, [])
gt_objs = cls_gt_frame_dict.get(frame_id, [])
self.eval_frame_dict(trk_objs, gt_objs, rtn_events=False)
self.class_accs.append(self.acc)
return self.class_accs
@staticmethod
def get_summary(accs,
names,
metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1',
'precision', 'recall')):
import motmetrics as mm
mm.lap.default_solver = 'lap'
names = copy.deepcopy(names)
if metrics is None:
metrics = mm.metrics.motchallenge_metrics
metrics = copy.deepcopy(metrics)
mh = mm.metrics.create()
summary = mh.compute_many(
accs, metrics=metrics, names=names, generate_overall=True)
return summary
@staticmethod
def save_summary(summary, filename):
import pandas as pd
writer = pd.ExcelWriter(filename)
summary.to_excel(writer)
writer.save()
class MCMOTMetric(Metric):
def __init__(self, num_classes, save_summary=False):
self.num_classes = num_classes
self.save_summary = save_summary
self.MCMOTEvaluator = MCMOTEvaluator
self.result_root = None
self.reset()
self.seqs_overall = defaultdict(list)
def reset(self):
self.accs = []
self.seqs = []
def update(self, data_root, seq, data_type, result_root, result_filename):
evaluator = self.MCMOTEvaluator(data_root, seq, data_type,
self.num_classes)
seq_acc = evaluator.eval_file(result_filename)
self.accs.append(seq_acc)
self.seqs.append(seq)
self.result_root = result_root
cls_index_name = [
'{}_{}'.format(seq, i) for i in range(self.num_classes)
]
summary = parse_accs_metrics(seq_acc, cls_index_name)
summary.rename(
index={'OVERALL': '{}_OVERALL'.format(seq)}, inplace=True)
for row in range(len(summary)):
self.seqs_overall[row].append(summary.iloc[row:row + 1])
def accumulate(self):
self.cls_summary_list = []
for row in range(self.num_classes):
# 如果不是车相关的4个类别,则跳过不参与计算
if self.num_classes == 10 and row not in [3,4,5,8]:
continue
seqs_cls_df = pd.concat(self.seqs_overall[row])
seqs_cls_summary = seqs_overall_metrics(seqs_cls_df)
cls_summary_overall = seqs_cls_summary.iloc[-1:].copy()
cls_summary_overall.rename(
index={'overall_calc': 'overall_calc_{}'.format(row)},
inplace=True)
self.cls_summary_list.append(cls_summary_overall)
def log(self):
seqs_summary = seqs_overall_metrics(
pd.concat(self.seqs_overall[self.num_classes]), verbose=True)
class_summary = seqs_overall_metrics(
pd.concat(self.cls_summary_list), verbose=True)
def get_results(self):
return 1
| StarcoderdataPython |
4802378 | # coding=utf8
# 라이브러리 가져오기
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import re
from urllib.request import urlretrieve
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1920x1080')
options.add_argument("disable-gpu")
options.add_argument('log-level=3')
driver = webdriver.Chrome('chromedriver.exe', chrome_options=options)
need_loginpage = False
# 영상 다운로드
def download(video_url):
driver.get(video_url)
page_source = driver.page_source
if check_isHD(page_source):
regex = "hd_src:\"[^\"\s()]+"
video_source = re.findall(regex, page_source)[0].replace('&', '&').replace('hd_src:"', '')
else:
regex = "sd_src:\"[^\"\s()]+"
video_source = re.findall(regex, page_source)[0].replace('&', '&').replace('sd_src:"', '')
urlretrieve(video_source, get_videoName(video_source))
print("다운로드 완료")
# 로그인이 필요한 영상 다운로드
def download_private(video_url):
print("로그인이 필요한 영상입니다.")
login_driver = webdriver.Chrome('chromedriver.exe')
login_driver.get(video_url)
while True:
if check_login(video_url, login_driver):
break
login_driver.get(video_url)
page_source = login_driver.page_source
if check_isHD(page_source):
regex = "hd_src:\"[^\"\s()]+"
video_source = re.findall(regex, page_source)[0].replace('&', '&').replace('hd_src:"', '')
else:
regex = "sd_src:\"[^\"\s()]+"
video_source = re.findall(regex, page_source)[0].replace('&', '&').replace('sd_src:"', '')
urlretrieve(video_source, get_videoName(video_source))
print("다운로드 완료")
login_driver.quit()
# 로그인 여부 확인
def check_login(video_url, login_driver):
try:
login_driver.find_element_by_class_name("_1k67")
except NoSuchElementException:
return False
return True
# 로그인이 필요한 영상인지 확인
def check_private(video_url):
try:
driver.find_element_by_class_name("login_page")
except NoSuchElementException:
if check_privategroup(video_url):
need_loginpage = True
return True
return False
return True
# HD 동영상인지 SD 동영상인지 확인
def check_isHD(page_source):
regex = "hd_src:[^\\s(),]+"
if re.findall(regex, page_source)[0] == 'hd_src:null':
return False
else:
return True
# URL에서 영상 파일 명 추출
def get_videoName(video_source):
regex = r"(\w+\.\w+)(?=\?|$)"
return re.findall(regex, video_source)[0]
# 비공개 그룹 여부 확인
def check_privategroup(video_url):
try:
driver.find_element_by_tag_name("video")
except NoSuchElementException:
regex = "joinButton_[0-9]+"
if bool(re.search(regex, driver.page_source)):
return True
return False
# 프로그램 메인 함수
if __name__ == '__main__':
print('Facebook Video Downloader')
video_url = input('다운로드 하실 영상의 링크를 입력해주세요 : ')
driver.get(video_url)
if check_private(video_url):
download_private(video_url)
else:
download(video_url)
driver.quit() | StarcoderdataPython |
3359931 | # -*- coding: utf-8 -*-
from PySide6.QtCore import QEvent
class BlameEvent(QEvent):
Type = QEvent.User + 1
def __init__(self, filePath, rev=None, lineNo=0):
super().__init__(QEvent.Type(BlameEvent.Type))
self.filePath = filePath
self.rev = rev
self.lineNo = lineNo
class ShowCommitEvent(QEvent):
Type = QEvent.User + 2
def __init__(self, sha1):
super().__init__(QEvent.Type(ShowCommitEvent.Type))
self.sha1 = sha1
class OpenLinkEvent(QEvent):
Type = QEvent.User + 3
def __init__(self, link):
super().__init__(QEvent.Type(OpenLinkEvent.Type))
self.link = link
class CopyConflictCommit(QEvent):
Type = QEvent.User + 4
def __init__(self, commit):
super().__init__(QEvent.Type(CopyConflictCommit.Type))
self.commit = commit
class GitBinChanged(QEvent):
Type = QEvent.User + 5
def __init__(self):
super().__init__(QEvent.Type(GitBinChanged.Type))
| StarcoderdataPython |
1772873 | <reponame>LucasRR94/RPG_Pirates_and_Fishers
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
from Item import *
from Defense import *
import random
import string
def testAssignname_Defense(defense,name,numberofsum,num):
if(len(name) >= 5 and len(name) <= 32):
try:
#print (defense.getDefense())
assert defense.getDefense() == numberofsum
try:
assert defense.getName() == name
except (AssertionError):
print(" \033[91m Error : Defense name:"+str(num))
except(AssertionError):
print("\033[91m Error : Defense failed :"+str(num))
else:
try:
#print (defense.getDefense())
assert defense.getDefense() == numberofsum
except(AssertionError):
print("\033[91m Error : Defense failed :"+str(num))
def testfuncgetDamage(defense,damage,caseStudy):
capacitydefense = defense.getDefense()
if(capacitydefense > damage):
try:
assert defense.reportDamage(damage) == (capacitydefense-damage)
except AssertionError:
return "\033[91m Error in calculum of the values,case of test:"+str(caseStudy)+"\n"
else:
return "\033[93m Passed in test:" + str(caseStudy)
else: # Class call the destructor, because that object not exist anymore.
try:
assert defense.reportDamage(damage) == 0
except AssertionError:
return "\033[91m Error in calculum of the values,case of test:"+str(caseStudy)+"\n"
else:
#print(defense.getDefense())
#print(defense.getDetail())
return "\033[93m Passed in test:"+str(caseStudy)
if __name__ == "__main__":
sword = Defense("ICE",10,10)
sword1 = Defense("ICE","NVO",10)
sword2 = Defense("ICE",2,10)
sword3 = Defense("ICE",1,10)
sword4 = Defense("ICE",0,0)
sword5 = Defense("ICE",100,-110)
testAssignname_Defense(sword,"ICE",100,1)
testAssignname_Defense(sword1,"ICE",100,2)
testAssignname_Defense(sword2,"ICE",20,3)
testAssignname_Defense(sword3,"ICE",10,4)
testAssignname_Defense(sword4,"ICE",0,5)
testAssignname_Defense(sword5,"ICE",0,6)
for i in range(1000):
"""
Will generate 1000 diferent names, like capacity of defense
, and will add in function of testfuncgetDamage
"""
tam = random.randint(0,100)
randomnumb1 = random.randint(0,10)
randomnumb2 = random.randint(0,10)
wordgen = ''
for j in range(tam): # just to generate random names
lettersused = str(string.ascii_uppercase + string.digits + string.ascii_lowercase)
wordgen += random.choice(lettersused)
newdefense = Defense(wordgen,randomnumb1,randomnumb2)
damage_random = random.randint(0,25) # low numbers
print(testfuncgetDamage(newdefense,damage_random,"L"+str(i)))
#testfuncgetDamage(newdefense,damage_random,"L"+str(i))
newdefense = Defense(wordgen,randomnumb1,randomnumb2)
damage_random = random.randint(75,100) # big numbers
print(testfuncgetDamage(newdefense,damage_random,"B"+str(i)))
#testfuncgetDamage(newdefense,damage_random,"L"+str(i))
print("Test has finished")
| StarcoderdataPython |
29629 | <gh_stars>0
from django.dispatch import Signal
badge_awarded = Signal(providing_args=["badge"])
| StarcoderdataPython |
157676 | <reponame>euro-cordex/scheduler
#! /usr/bin/python
# coding: utf-8
"""Scheduler
Classes and methods in :mod:`Scheduler` should create jobscripts
for different schedulers and help submitting and checking them.
"""
import logging
import os
import subprocess
from configobj import ConfigObj
from string import Template
import datetime
import re
import sys
#logging.basicConfig(level=logging.INFO)
# slurm job template
SLURM_TEMPLATE = \
"""@job_shell
#SBATCH --job-name=@job_name # Specify job name
#SBATCH --partition=@partition # Specify partition name
#SBATCH --ntasks=@ntasks # Specify max. number of tasks to be invoked
#SBATCH --mem-per-cpu=@mem_per_cpu # Specify real memory required per CPU in MegaBytes
#SBATCH --time=@time # Set a limit on the total run time
#SBATCH --mail-type=@mail_type # Notify user by email in case of job failure
#SBATCH --account=@account # Charge resources on this project account
#SBATCH --output=@{log_dir}/@{job_name}.o%j # File name for standard output
#SBATCH --error=@{log_dir}/@{job_name}.o%j # File name for standard error output
"""
SLURM_COMMENT = \
"""
# job script created by PyRemo Job Scheduler
""" + \
"# "+str(datetime.datetime.now())+"\n"
# define job states
UNKNOWN = -2
FAILED = -1
COMPLETED = 0
RUNNING = 1
PENDING = 2
LOGLEV = {
COMPLETED :logging.getLevelName('INFO'),
FAILED :logging.getLevelName('ERROR'),
UNKNOWN :logging.getLevelName('WARNING'),
PENDING :logging.getLevelName('INFO'),
RUNNING :logging.getLevelName('INFO')
}
### SLURM definitions ####
SLURM_STATES = {'FAILED' : FAILED,
'COMPLETED': COMPLETED,
'RUNNING' : RUNNING ,
'PENDING' : PENDING }
SLURM_DEFAULTS = {'job_name' :'unknown',
'partition' :'',
'ntasks' :'1',\
'mem_per_cpu' :'1280',
'mail_type' :'FAIL',
'account' :'',
'time' :'',
'log_dir' :''}
SLURM_CONTROL = ['StdErr','StdOut','WorkDir','JobName','Command']
### known Schedulers ####
SCHEDULER = {'SLURM':{'batch' :'sbatch --parsable',\
'accounting':'sacct --parsable2 --format=jobid,elapsed,ncpus,ntasks,state,end,jobname -j',
'control' :'scontrol show jobid -dd',
'tpl' :SLURM_TEMPLATE,
'states' :SLURM_STATES,
'comment' :SLURM_COMMENT,
'ctr_list' :SLURM_CONTROL,
'defaults' :SLURM_DEFAULTS}
}
# pattern to search for in logfiles
ERROR_PATTERN = ['error','failed','exception', 'not found']
class Job():
"""Class to hold job information
Written by <NAME>
Last modified: 06.02.2019
"""
def __init__(self, sys, jobname='', jobscript=None, jobid=-1, tpl=None,
commands='', header_dict={}, delimiter='@', control={}):
"""Determines the appropriate job commands and templates.
A job can be created from a template, header dictionary and
commands to write and submit a job script, or by defining a
jobid to create a job from an existing scheduler job.
Therefor, the job definition is hold quite general and requires
only the sys argument for creating.
**Arguments:**
*sys:*
The batch scheduler implementation.
*jobname:*
The batch scheduler implementation.
(default: *None*)
*jobscript:*
A filename for the jobscript.
(default: *None*)
*jobid:*
JobID in the Scheduler.
(default: -1)
*tpl:*
Template file for creating a jobscript.
(default: *None*)
*commands:*
Text containing commands for the jobscript.
(default: '')
*header_dict:*
Dictionary to fill the template header
(default: *None*)
**Raises:**
*Exception:*
If the scheduler implementation is unknown
"""
if sys not in SCHEDULER:
print('Unknown scheduler implementation, must be one of: '+\
str(SCHEDULER.keys()))
raise Exception('Unknown sys: '+sys)
self.batch_command = SCHEDULER[sys]['batch']
self.acct_command = SCHEDULER[sys]['accounting']
self.contr_command = SCHEDULER[sys]['control']
self.header_dict = SCHEDULER[sys]['defaults']
self.comment = SCHEDULER[sys]['comment']
self.ctr_list = SCHEDULER[sys]['ctr_list']
self.jobname = jobname
self.jobscript = jobscript
self.tpl = tpl
self.jobid = jobid
self.commands = commands
self.delimiter = delimiter
self.control = control
if not self.tpl : self.tpl = SCHEDULER[sys]['tpl']
if not self.jobscript: self.jobscript = os.path.join(os.getcwd(),
jobname+'.sh')
self._init_tpl()
self.header_dict['job_name'] = self.jobname
self.header_dict['job_shell'] = '#!/bin/sh'
if header_dict: self.header_dict.update(header_dict)
def __eq__(self, other):
if self.jobname == other.jobname:
return True
else:
return False
def _init_tpl(self):
class TmpTemplate(Template):
delimiter = self.delimiter
if type(self.tpl) is str:
if os.path.isfile(self.tpl):
self.tpl = TmpTemplate( open(self.tpl).read() )
else:
self.tpl = TmpTemplate( self.tpl )
elif type(self.tpl) is not type(Template):
raise Exception('unknown type of job template, must be either '+
'Template or text or valid filename')
def submit(self, write=False):
if write: self.write_jobscript()
output = subprocess.Popen(self.batch_command.split()+[self.jobscript],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout,stderr = output.communicate()
logging.debug('stdout '+str(stdout))
self.jobid = int(stdout)
if not stderr:
logging.info('submitted jobscript: '+self.jobscript)
logging.debug('jobid: '+str(self.jobid))
self.parse_control()
for entry in self.control:
logging.debug('control: '+str(entry)+': '+str(self.control[entry]))
else:
logging.error('submitted jobscript: '+self.jobscript)
logging.error('stderr'+str(self.stderr))
return self.jobid
def write_jobscript(self, header_dict=None):
if header_dict: self.header_dict.update(header_dict)
fills = self.header_dict
logging.info('writing: '+self.jobscript)
content = self.tpl.substitute(fills)
content += self.comment
content += self.commands
with open(self.jobscript, "w") as script:
script.write(content)
return self.jobscript
def get_acct(self):
jobid = self.jobid
command = self.acct_command.split(' ')+[str(jobid)]
output = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout,stderr = output.communicate()
lines = stdout.splitlines()
header = lines[0].decode().split('|')
data = lines[1].decode().split('|')
acct = {}
for title,entry in zip(header,data):
acct[title] = entry
logging.debug(str(jobid)+': '+str(acct))
return acct
def parse_control(self):
jobid = self.jobid
command = self.contr_command.split(' ')+[str(jobid)]
output = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout,stderr = output.communicate()
logging.debug(stdout,stderr)
lines = stdout.splitlines()
control = {}
for line in lines:
slines = line.split()
for sline in slines:
data = sline.split(b'=')
if len(data)==2:
control[data[0]] = data[1]
self.control = control
return control
def get_log(self):
# basic info
job_log = {'jobid' : self.jobid, \
'jobscript': self.jobscript}
# add some info from control command
for ctr in self.control:
if ctr in self.ctr_list: job_log[ctr] = self.control[ctr]
return job_log
def grep_log_err(self):
error = []
errlog = self.control['StdErr'] if 'StdErr' in self.control else None
if errlog and os.path.isfile(errlog):
with open(errlog) as origin_file:
for line in origin_file:
for pattern in ERROR_PATTERN:
if re.search(pattern, line, re.IGNORECASE):
error.append(line.rstrip())
break
return error
class Scheduler():
"""Class to interact with an HPC job scheduler
Written by <NAME>
Last modified: 06.02.2019
"""
def __init__(self, sys, name='', tpl=None, logfile='', \
job_list = [], header_dict={}):
"""Determines the appropriate scheduler commands and templates.
**Arguments:**
*sys:*
The batch scheduler implementation
*tpl:*
Template file for the scheduler
(default: *None*)
*logfile*
A file in ini style holding jobid information
(default: *None*)
*jobids*
A dictionary holding {jobname:jobid}
(default: *None*)
*header_dict*
A default header dictionary for a new job
(default: *None*)
*job_list*
A list of type Job
(default: *None*)
**Raises:**
*Exception:*
If the scheduler implementation is unknown
"""
self.STAT_STR = '{:<48} | {:>8} | {:<16} | {:<24} | {:<24}'
if sys not in SCHEDULER:
print('Unknown scheduler implementation, must be one of: '+\
str(SCHEDULER.keys()))
raise Exception('Unknown sys: '+sys)
self.STAT = SCHEDULER[sys]['states']
self.sys = sys
self.name = name
self.logfile = logfile
self.job_list = job_list
self.job_log = ConfigObj(self.logfile)
self.batch = SCHEDULER[sys]['batch']
self.acct = SCHEDULER[sys]['accounting']
self.tpl = SCHEDULER[sys]['tpl']
self.header_dict = header_dict
if tpl : self.tpl = tpl
if self.logfile: self._read_job_log()
def _init_job_list(self):
"""Initiate the job list from scheduler log file
Written by <NAME>
Last changes 06.02.2019
"""
self.job_list = []
for jobname in self.job_log:
jobid = self.job_log[jobname]['jobid']
jobscript = self.job_log[jobname]['jobscript']
self.job_list.append(Job(self.sys,jobname=jobname,\
jobid=jobid,jobscript=jobscript, \
control=self.job_log[jobname]))
def _read_job_log(self):
"""Reads jobnames and ids from an ini-file.
Written by <NAME>
Last changes 06.02.2019
"""
logging.debug('reading jobids: '+str(self.logfile))
self.job_log = ConfigObj(self.logfile)
self._init_job_list()
def _write_job_log(self):
"""Writes jobnames and ids to an ini-file.
Written by <NAME>
Last changes 06.02.2019
"""
logging.debug('writing jobids to '+str(self.job_log.filename))
for job in self.job_list:
self.job_log[job.jobname] = job.get_log()
self.job_log.write()
def create_job(self,jobname,jobscript,commands='',
header_dict={},write=True):
header=self.header_dict
if header_dict: header.update(header_dict)
# prefix the jobname with the scheduler's name
job = Job(self.sys,jobname=jobname,jobscript=jobscript,
commands=commands,header_dict=header,tpl=self.tpl)
self.update_job_list(job)
if write: job.write_jobscript()
return job
def update_job_list(self,job):
# update job list, if job alread in list
if job in self.job_list:
self.job_list = [job if job==x
else x for x in self.job_list]
else: self.add_job(job)
def add_job(self,job):
self.job_list.append(job)
def get_job(self,jobname):
job = next((j for j in self.job_list if j.jobname == jobname), None)
return job
def submit(self,jobname=None):
if jobname:
self.get_job(jobname).submit()
else:
for job in self.job_list:
job.submit()
self._write_job_log()
def write_jobscripts(self):
for job in self.job_list:
logging.debug('jobname '+job.jobname)
job.write_jobscript()
def get_jobids(self):
"""Returns a dict containing {jobname:jobid}
**Returns:**
*jobids:*
A dict containing {jobname:jobid}.
Written by <NAME>
Last changes 06.02.2019
"""
self._read_jobids()
return self.jobids
def read_jobids(self):
pass
def get_job_list(self, filters=[]):
job_list = self.job_list
#filter job list by filtering jobnames
if filters:
job_list = [job for job in self.job_list if any
(filter in job.jobname for filter in filters)]
return job_list
def get_jobs_acct(self, filters=[]):
"""Returns a dict containing job accounting
**Arguments:**
*filters:*
List of strings to filter jobnames before
accessing the scheduler database.
**Returns:**
*jobs_acct:*
A dict containing job accounting information, in the
form, e.g.: {jobname:{'State':state,'JobID':jobid,...}}.
Written by <NAME>
Last changes 06.02.2019
"""
jobs_acct = {}
job_list = self.get_job_list(filters=filters)
#filter job list by filtering jobnames
for job in job_list:
jobs_acct[job.jobname] = job.get_acct()
return jobs_acct
def resubmit(self,states=[]):
jobs_acct = self.get_jobs_acct()
submit_jobs = {}
for jobname in jobs_acct:
logging.debug('jobname: '+jobname)
if jobs_acct[jobname]['State'] in states:
self.get_job(jobname).submit()
self._write_job_log()
def log_jobs_acct(self, filters=None):
"""Logs job accounting information.
**Arguments:**
*filters:*
A string to filter jobnames.
Written by <NAME>
Last changes 06.02.2019
"""
counter = {i:0 for i in range(-2,3)}
jobs_acct = self.get_jobs_acct(filters=filters)
logging.info(self.STAT_STR.format('StdErr','JobID','State','End','Preview'))
for jobname in jobs_acct:
job = self.get_job(jobname)
jobid = jobs_acct[jobname]['JobID']
state = jobs_acct[jobname]['State']
end = jobs_acct[jobname]['End']
stateid = self.STAT[state] if state in self.STAT else UNKNOWN
counter[stateid]+=1
logfile = ''
error = []
if 'StdErr' in job.control:
logfile = os.path.basename(job.control['StdErr'])
# if stateid in [FAILED,UNKNOWN]:
error = job.grep_log_err()
message = self.STAT_STR.format(logfile,jobid,state,end,'|'.join(error))
logging.log(LOGLEV[stateid],message)
invert = {v: k for k, v in self.STAT.items()}
for i in invert:
# log errors only if counter > 0
if counter[i]>0 or i>=0: logging.log(LOGLEV[i], str(counter[i])+' jobs '+ invert[i] )
| StarcoderdataPython |
3238201 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tkinter import *
import urllib.request
import json
import requests
import datetime
from services.helper import * #this is how to import another file
##print(getDate())
## openweathermap api : https://home.openweathermap.org/api_keys
## key ba6bea63055e16232cde72d76b13baca
##this is how to make json object ##
#configs = json.loads('{"__type__":"Config", "basedUrl":"http://api.openweathermap.org/data/2.5/weather?","weatherApiKey":"ba6bea63055e16232cde72d76b13baca"}')
config = {"basedUrl":"http://api.openweathermap.org/data/2.5/weather?","weatherApiKey":"ba6bea63055e16232cde72d76b13baca"}
#print(configs['weatherApiKey'])
#print(configs['basedUrl'])
#print(HelperService.getTime())
class App:
def __init__(self, master):
frame = Frame(master)
frame.pack()
## self.frameDate = Frame(master, bg='red', width=100,height=100)
## self.frameDate.pack(side=LEFT)
## self.frameTemp = Frame(master, bg='blue',width=100,height=100)
## self.frameTemp.pack(side=LEFT)
self.labelDate = Label(master,text= HelperService.getDate())
self.labelDate.pack(side=RIGHT)
self.labelTime = Label(master, text=HelperService.getTime())
self.labelTime.pack(side=LEFT)
self.labelTemp = Label(master, text='Tempurature')
self.labelTemp.pack()
##
## self.button = Button(frame, text="QUIT", fg="red", command=frame.quit)
## self.button.pack(side=LEFT)
##
## self.btnWeather = Button(frame, text="Weather", command=self.getWeather)
## self.btnWeather.pack(side=LEFT)
self.getWeather()
def getWeather(self):
url = config['basedUrl'] + 'zip=76005,us&units=imperial&appid=' + config['weatherApiKey']
data = requests.get(url=url)
#print(data.json())
jsonData = data.json() ##This is how to make json
self.labelTemp['text'] = str(jsonData['main']['temp']) + '°'#°this is how to assign val
def openCalendar(self):
print('config: ' + config['basedUrl'])
root = Tk()
##root.resizable(width=False,height=False)
##root.geometry('{}x{}'.format(500, 500))
app = App(root)
root.mainloop()
root.destroy()
| StarcoderdataPython |
3285428 | <reponame>jhmarlow/python-template-repository
#!/usr/bin/env python
from setuptools import setup, find_packages
import pathlib
import pkg_resources
# import versioneer
with pathlib.Path('requirements.txt').open() as requirements_txt:
install_requires = [
str(requirement)
for requirement
in pkg_resources.parse_requirements(requirements_txt)
]
with pathlib.Path('requirements_test.txt').open() as requirements_txt:
test_install_requires = [
str(requirement)
for requirement
in pkg_resources.parse_requirements(requirements_txt)
]
setup(name='examplepackage',
description='An example package',
author='<NAME>',
author_email='',
url='',
# version=versioneer.get_version(),
packages=find_packages(),
entry_points='''
[console_scripts]
hello=example_main:hello
''',
install_requires=install_requires,
extras_require={'test': test_install_requires}
)
| StarcoderdataPython |
1715261 | <filename>sockjs_flask/gunicorn/workers.py
from sockjs_flask.handler import Handler
from gunicorn.workers.ggevent import GeventPyWSGIWorker
class GeventWebSocketWorker(GeventPyWSGIWorker):
wsgi_handler = Handler
| StarcoderdataPython |
139317 | <gh_stars>1-10
from torch.utils.tensorboard import SummaryWriter
| StarcoderdataPython |
4842301 | <reponame>shiftgig/petri-dish
from abc import ABC, abstractmethod
import pandas as pd
from petri_dish.stat_tools import chi_squared, ttest
class AbstractBaseDistributor(ABC):
def __init__(self, treatment_group_ids):
self.treatment_group_ids = treatment_group_ids
@abstractmethod
def assign_group(self, subjects):
"""
Assigns each of the experimentation subjects (without a previous assignment) to a treatment group
(control is also considered a treatment group here, with no treatment applied).
Parameters
----------
subjects: pandas DataFrame | table with all experimentation subjects, assigned and unassigned. With features.
Returns
-------
selected_assignments: pandas dataFrame | table with all experimentation subjects assigned to a treatment group.
max_min_p: float | the min p_value for the selected assignments for all the subjects features.
"""
pass
class StochasticDistributor(AbstractBaseDistributor):
"""
Uses law of large numbers principle and random assignment for the treatment groups.
The assignment method for this subclass is stochastic and generally should be used when the law
of large numbers is observable for each group, namely, when there is a large quantity of experimentation
subjects on each group.
"""
def assign_group(self, subjects):
# TODO: implement
pass
class DirectedDistributor(AbstractBaseDistributor):
"""
Directs the distributions based on subject's characteristics.
The assignment method for this subclass is directed. This means that an experimentation subject will be
assigned to a treatment group based on the total balance of the system in terms of the subject's characteristics.
This method should be generally used when a small number of subjects per treatment group is expected, and therefore,
where the law of large numbers isn't observable.
Properties
----------
random_attempts: int | number of trials to run the random assignment of treatment groups.
"""
random_attempts = 500
def __init__(
self,
treatment_group_ids,
treatment_assignment_col,
balancing_features,
discrete_features,
continuous_features
):
AbstractBaseDistributor.__init__(self, treatment_group_ids)
self.treatment_assignment_col = treatment_assignment_col
self.balancing_features = balancing_features
self.discrete_features = discrete_features
self.continuous_features = continuous_features
def assign_group(self, subjects):
# copy original dataframe since we are going to change them
subjects_copy = subjects.copy()
# get the count of each treatment in each of the blocking bins
current_assignments_balance = self._get_current_assignment_balance(subjects_copy)
max_min_p = 0
# Try several randomized assignments (with guaranteed balance across blocking variables) and choose one
for _ in range(self.random_attempts):
# Generate candidate assignments
(candidate_subjects_copy, candidate_assignments_balance) = self._generate_candidate_assignments(
subjects_copy,
current_assignments_balance
)
# Test the distribution quality of those assignments
min_p_value = self._calculate_min_p_value_distribution_independence(candidate_subjects_copy)
# Keep current trial if the distribution is better (evenly across treatment groups) than the previous one
if min_p_value > max_min_p:
selected_assignments = candidate_subjects_copy
max_min_p = min_p_value
return selected_assignments, max_min_p
def _get_current_assignment_balance(self, subjects_df, count_nulls=False):
# Get a multi index that includes all combination of blocking variables and treatments
joint_index = pd.MultiIndex.from_product(
[subjects_df[col].unique() for col in self.balancing_features] + [self.treatment_group_ids],
names=(self.balancing_features + [self.treatment_assignment_col])
)
joint_block = joint_index.map(tuple)
# Remove nulls if not counting
if not count_nulls:
subjects_df = subjects_df.dropna(subset=[self.treatment_assignment_col])
if subjects_df.empty:
counts = pd.Series(0, index=joint_index, dtype=float, name=self.treatment_assignment_col)
else:
# Count the number of occurences of each value in each bin
counts = subjects_df.groupby(self.balancing_features)[self.treatment_assignment_col].value_counts()
# And in missing bins
counts = counts.reindex(joint_block).fillna(0)
return counts.sort_index()
def _generate_candidate_assignments(self, subjects, assignments_balance):
"""
Generates candidate assignments for all the unassigned test subjects using the pre-existing assignments
and the balancing features.
"""
candidate_subjects_assignments = subjects.copy()
candidate_assignments_balance = assignments_balance.copy()
# for each unassigned row
for ind, subject in candidate_subjects_assignments[candidate_subjects_assignments[
self.treatment_assignment_col].isnull()
].iterrows():
# determine which bin of the blocking variables the subject belongs to
block_index = tuple(subject.loc[self.balancing_features])
# choose a random assignment balanced across within the bin
assignment = (
candidate_assignments_balance
.loc[block_index] # Get the number of assignments to each treatment for the bin
.sample(frac=1)
.argmin()
)
candidate_subjects_assignments.loc[ind, self.treatment_assignment_col] = assignment
candidate_assignments_balance.loc[block_index + (assignment,)] += 1
return (candidate_subjects_assignments, candidate_assignments_balance)
def _calculate_min_p_value_distribution_independence(self, candidate_subjects_data):
"""
Calculates minimum p-value for any discrete (categorical) and continous variable across treatments.
Ideally the distribution of each feature across treatment groups should not be independent,
thus statistically we want the p-value to be high.
(low p-value indicating lower confidence interval to reject null hypothesis of
independence in the distribution).
"""
min_p = 1
# p-values for discrete balancing variables
for cat in self.balancing_features + self.discrete_features:
p = chi_squared(
candidate_subjects_data[cat],
candidate_subjects_data[self.treatment_assignment_col]
)
min_p = min(min_p, p)
# p-values for continuous balancing variables
for t_ind1 in range(len(self.treatment_group_ids)):
treatment1_slice = candidate_subjects_data[
candidate_subjects_data[self.treatment_assignment_col] == self.treatment_group_ids[t_ind1]
]
for t_ind2 in range(t_ind1 + 1, len(self.treatment_group_ids)):
treatment2_slice = candidate_subjects_data[candidate_subjects_data[
self.treatment_assignment_col] == self.treatment_group_ids[t_ind2]
]
for col in self.continuous_features:
if (treatment1_slice[col].notnull().sum() > 1) and (treatment2_slice[col].notnull().sum() > 1):
p = ttest(treatment1_slice[col], treatment2_slice[col])
min_p = min(min_p, p)
return min_p
| StarcoderdataPython |
3231516 | import re
from pathlib import Path
import pytest
from packaging.tags import Tag
from poetry.core.packages.package import Package
from poetry.installation.chooser import Chooser
from poetry.repositories.legacy_repository import LegacyRepository
from poetry.repositories.pool import Pool
from poetry.repositories.pypi_repository import PyPiRepository
from poetry.utils.env import MockEnv
JSON_FIXTURES = (
Path(__file__).parent.parent / "repositories" / "fixtures" / "pypi.org" / "json"
)
LEGACY_FIXTURES = Path(__file__).parent.parent / "repositories" / "fixtures" / "legacy"
@pytest.fixture()
def env():
return MockEnv(
supported_tags=[
Tag("cp37", "cp37", "macosx_10_15_x86_64"),
Tag("py3", "none", "any"),
]
)
@pytest.fixture()
def mock_pypi(http):
def callback(request, uri, headers):
parts = uri.rsplit("/")
name = parts[-3]
version = parts[-2]
fixture = JSON_FIXTURES / name / (version + ".json")
if not fixture.exists():
fixture = JSON_FIXTURES / (name + ".json")
if not fixture.exists():
return
with fixture.open(encoding="utf-8") as f:
return [200, headers, f.read()]
http.register_uri(
http.GET,
re.compile("^https://pypi.org/(.+?)/(.+?)/json$"),
body=callback,
)
@pytest.fixture()
def mock_legacy(http):
def callback(request, uri, headers):
parts = uri.rsplit("/")
name = parts[-2]
fixture = LEGACY_FIXTURES / (name + ".html")
with fixture.open(encoding="utf-8") as f:
return [200, headers, f.read()]
http.register_uri(
http.GET,
re.compile("^https://foo.bar/simple/(.+?)$"),
body=callback,
)
@pytest.fixture()
def pool():
pool = Pool()
pool.add_repository(PyPiRepository(disable_cache=True))
pool.add_repository(
LegacyRepository("foo", "https://foo.bar/simple/", disable_cache=True)
)
return pool
@pytest.mark.parametrize("source_type", ["", "legacy"])
def test_chooser_chooses_universal_wheel_link_if_available(
env, mock_pypi, mock_legacy, source_type, pool
):
chooser = Chooser(pool, env)
package = Package("pytest", "3.5.0")
if source_type == "legacy":
package = Package(
package.name,
package.version.text,
source_type="legacy",
source_reference="foo",
source_url="https://foo.bar/simple/",
)
link = chooser.choose_for(package)
assert "pytest-3.5.0-py2.py3-none-any.whl" == link.filename
@pytest.mark.parametrize("source_type", ["", "legacy"])
def test_chooser_chooses_specific_python_universal_wheel_link_if_available(
env, mock_pypi, mock_legacy, source_type, pool
):
chooser = Chooser(pool, env)
package = Package("isort", "4.3.4")
if source_type == "legacy":
package = Package(
package.name,
package.version.text,
source_type="legacy",
source_reference="foo",
source_url="https://foo.bar/simple/",
)
link = chooser.choose_for(package)
assert "isort-4.3.4-py3-none-any.whl" == link.filename
@pytest.mark.parametrize("source_type", ["", "legacy"])
def test_chooser_chooses_system_specific_wheel_link_if_available(
mock_pypi, mock_legacy, source_type, pool
):
env = MockEnv(
supported_tags=[Tag("cp37", "cp37m", "win32"), Tag("py3", "none", "any")]
)
chooser = Chooser(pool, env)
package = Package("pyyaml", "3.13.0")
if source_type == "legacy":
package = Package(
package.name,
package.version.text,
source_type="legacy",
source_reference="foo",
source_url="https://foo.bar/simple/",
)
link = chooser.choose_for(package)
assert "PyYAML-3.13-cp37-cp37m-win32.whl" == link.filename
@pytest.mark.parametrize("source_type", ["", "legacy"])
def test_chooser_chooses_sdist_if_no_compatible_wheel_link_is_available(
env,
mock_pypi,
mock_legacy,
source_type,
pool,
):
chooser = Chooser(pool, env)
package = Package("pyyaml", "3.13.0")
if source_type == "legacy":
package = Package(
package.name,
package.version.text,
source_type="legacy",
source_reference="foo",
source_url="https://foo.bar/simple/",
)
link = chooser.choose_for(package)
assert "PyYAML-3.13.tar.gz" == link.filename
@pytest.mark.parametrize("source_type", ["", "legacy"])
def test_chooser_chooses_distributions_that_match_the_package_hashes(
env,
mock_pypi,
mock_legacy,
source_type,
pool,
):
chooser = Chooser(pool, env)
package = Package("isort", "4.3.4")
files = [
{
"hash": "sha256:b9c40e9750f3d77e6e4d441d8b0266cf555e7cdabdcff33c4fd06366ca761ef8",
"filename": "isort-4.3.4.tar.gz",
}
]
if source_type == "legacy":
package = Package(
package.name,
package.version.text,
source_type="legacy",
source_reference="foo",
source_url="https://foo.bar/simple/",
)
package.files = files
link = chooser.choose_for(package)
assert "isort-4.3.4.tar.gz" == link.filename
@pytest.mark.parametrize("source_type", ["", "legacy"])
def test_chooser_throws_an_error_if_package_hashes_do_not_match(
env,
mock_pypi,
mock_legacy,
source_type,
pool,
):
chooser = Chooser(pool, env)
package = Package("isort", "4.3.4")
files = [
{
"hash": "sha256:0000000000000000000000000000000000000000000000000000000000000000",
"filename": "isort-4.3.4.tar.gz",
}
]
if source_type == "legacy":
package = Package(
package.name,
package.version.text,
source_type="legacy",
source_reference="foo",
source_url="https://foo.bar/simple/",
)
package.files = files
with pytest.raises(RuntimeError) as e:
chooser.choose_for(package)
assert files[0]["hash"] in str(e)
| StarcoderdataPython |
69014 | #!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based
(somewhat) on the good old 'SimpleOSC' implementation by <NAME> & <NAME>.
This implementation is intended to still be 'simple' to the user, but much more
complete (with OSCServer & OSCClient classes) and much more powerful (the
OSCMultiClient supports subscriptions & message-filtering, OSCMessage &
OSCBundle are now proper container-types)
===============================================================================
OpenSoundControl
===============================================================================
OpenSoundControl is a network-protocol for sending (small) packets of addressed
data over network sockets. This OSC-implementation supports the classical
UDP/IP protocol for sending and receiving packets but provides as well support
for TCP/IP streaming, whereas the message size is prepended as int32 (big
endian) before each message/packet.
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a
(host:port) network-address!), followed by a string of 'typetags'
associated with the message's arguments (ie. 'payload'), and finally the
arguments themselves, encoded in an OSC-specific way. The OSCMessage class
makes it easy to create & manipulate OSC-messages of this kind in a
'pythonesque' way (that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only
OSC-messages as 'payload'. Recursively. (meaning; an OSC-bundle could
contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an
OSC-address (but the OSC-messages a bundle contains will have OSC-addresses!).
Also, an OSC-bundle can have a timetag, essentially telling the receiving
server to 'hold' the bundle until the specified time. The OSCBundle class
allows easy cration & manipulation of OSC-bundles.
For further information see also http://opensoundcontrol.org/spec-1_0
-------------------------------------------------------------------------------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you
need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket'
module) to send binary representations of OSC-messages to a remote host:port
address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local
port, and handles incoming requests. Either one-after-the-other (OSCServer) or
in a multi-threaded / multi-process fashion (ThreadingOSCServer/
ForkingOSCServer). If the Server has a callback-function (a.k.a. handler)
registered to 'deal with' (i.e. handle) the received message's OSC-address,
that function is called, passing it the (decoded) message.
The different OSCServers implemented here all support the (recursive) un-
bundling of OSC-bundles, and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 'd' (double), 's' (string) and
'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it
explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) <NAME> & <NAME>.
pyOSC:
Copyright (c) 2008-2010, <NAME> <<EMAIL>> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 <NAME> <<EMAIL>>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Changelog:
-------------------------------------------------------------------------------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by <NAME> & <NAME>.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
v0.3.6 - 19 April 2010
Added Streaming support (OSC over TCP)
Updated documentation
Moved pattern matching stuff into separate class (OSCAddressSpace) to
facilitate implementation of different server and client architectures.
Callbacks feature now a context (object oriented) but dynamic function
inspection keeps the code backward compatible
Moved testing code into separate testbench (testbench.py)
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 <NAME>, <NAME>
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact <NAME> <<EMAIL>>
> or visit http://www.stetson.edu/~ProctoLogic/
>
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
from socketserver import UDPServer, DatagramRequestHandler, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [float]
global IntTypes
IntTypes = [int]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address=""):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument
"""
self.clear(address)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = b""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if isinstance(argument,dict):
argument = list(argument.items())
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__') and not type(argument) in (str,bytes):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(list(self.values())))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(list(self.values()))
if isinstance(values,tuple):
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = list(self.values())
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in list(self.values()))
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return list(self.values())[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = list(self.items())
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = list(values.items())
elif isinstance(values,list):
items = []
for val in values:
if isinstance(val,tuple):
items.append(val[:2])
else:
items.append((typehint, val))
elif isinstance(values,tuple):
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = list(self.items())
new_items = self._buildItemList(val)
if not isinstance(i,slice):
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = list(self.items())
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return list(self.values()).count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return list(self.values()).index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = list(self.items()) + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = list(self.items())
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = list(self.items())
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = list(self.items())
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = list(self.items())
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(list(self.values()))
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(list(self.values()))
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(list(self.values()))
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(list(self.items()))
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in list(self.values()):
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if isinstance(argument,dict):
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next).encode('latin1'))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if isinstance(next,str):
next = next.encode('latin1')
if isinstance(next,bytes):
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = b''
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', int(secs), int(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0, 1)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = data.find(b'\0')
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length].decode('latin1'), data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print("Error: too few bytes for int", data, len(data))
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (int(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print("Error: too few bytes for float", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print("Error: too few bytes for double", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print("byte 0 1 2 3 4 5 6 7 8 9 A B C D E F")
if isinstance(bytes,str):
bytes = bytes.encode('latin1')
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % bytes[i]
if (i+1) % 16 == 0:
print("%s: %s" % (line, repr(bytes[i-15:i+1])))
line = ""
bytes_left = num % 16
if bytes_left:
print("%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:])))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == tuple:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if isinstance(port,int):
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (isinstance(url,str) and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
When the 'address' argument is given this client is connected to a specific remote server.
- address ((host, port) tuple): the address of the remote server to send all messages to
Otherwise it acts as a generic client:
If address == 'None', the client doesn't connect to a specific remote server,
and the remote address must be supplied when calling sendto()
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
if server == None:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = None
else:
self.setServer(server)
self.client_address = None
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
if self.socket != None:
self.close()
self.socket = server.socket.dup()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
return self.socket.getpeername()
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self.socket.connect(address)
self.client_address = address
except socket.error as e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.connect(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if isinstance(args,str):
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in list(filters.keys()):
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in list(filters.values()):
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in list(filters.items()):
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = str.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in list(self.targets.keys()):
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in list(src.keys()): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in list(src.items()):
if (addr in list(dst.keys())) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in list(self.targets.keys()):
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if isinstance(filters,str):
(_, filters) = parseFilterStr(filters)
elif not isinstance(filters,dict):
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
elif (isinstance(address,tuple)):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
if isinstance(address,tuple):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
if isinstance(address,tuple):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in list(self.targets.keys()):
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in list(self.targets.items()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
if (isinstance(address,tuple)):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in list(self.targets.keys())):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in list(dict.items()):
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in list(self.targets.items()):
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = list(out.values())
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in list(filters.keys()):
if filters['/*']:
out = msg
else:
out = None
elif False in list(filters.values()):
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in list(filters.keys()):
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = list(out.values())
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in list(self.targets.items()):
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return list(self.callbacks.keys())
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in list(self.callbacks.keys()):
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.socket = self.socket.dup()
client.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, client.sndbuf_size)
client._fd = client.socket.fileno()
client.server = self
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
if self.client != None:
self.client.close()
self.client = client
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in list(self.callbacks.keys()):
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (isinstance(item,int)) and not have_port:
url += ":%d" % item
have_port = True
elif isinstance(item,str):
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (isinstance(item,int)) and not have_port:
url += ":%d" % item
have_port = True
elif isinstance(item,str):
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError as e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print("SERVER: New client connection.")
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print("SERVER: Client connection handled.")
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error as e:
if e[0] == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print("SERVER: Entered server loop")
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print("OSC stream server: Spurious message received.")
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error as e:
if e[0] == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print("SERVER: Connection has been reset by peer.")
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return None
else:
continue
except socket.error as e:
if e[0] == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return None
else:
raise e
if not tmp or len(tmp) == 0:
print("CLIENT: Socket has been closed.")
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print("CLIENT: Entered receiving thread.")
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print("CLIENT: Receiving thread terminated.")
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return False
else:
continue
except socket.error as e:
if e[0] == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
| StarcoderdataPython |
140837 | """
Running operational space control with a PyGame display, and using the pydmps
library to specify a trajectory for the end-effector to follow, in
this case, a bell shaped velocity profile.
To install the pydmps library, clone https://github.com/studywolf/pydmps
and run 'python setup.py develop'
***NOTE*** there are two ways to use this filter
1: wrt to timesteps
- the dmp is created during the instantiation of the class and the next step
along the path is returned by calling the `step()` function
2: wrt to time
- after instantiation, calling `generate_path_function()` interpolates the dmp
to the specified time limit. Calling the `next_timestep(t)` function at a
specified time will return the end-effector state at that point along the path
planner. This ensures that the path will reach the desired target within the
time_limit specified in `generate_path_function()`
"""
import numpy as np
import matplotlib.pyplot as plt
try:
import pydmps
except ImportError:
print('\npydmps library required, see github.com/studywolf/pydmps\n')
from .path_planner import PathPlanner
class BellShaped(PathPlanner):
"""
PARAMETERS
----------
n_timesteps: int, Optional (Default: 3000)
the number of steps to break the path into
error_scale: int, Optional (Default: 1)
the scaling factor to apply to the error term, increasing error passed
1 will increase the speed of motion
"""
def __init__(self, n_timesteps=3000, error_scale=1):
self.n_timesteps = n_timesteps
self.error_scale = error_scale
# create a dmp for a straight reach with a bell shaped velocity profile
x = np.linspace(0, np.pi*2, 100)
a = 1 # amplitude
b = np.pi # center
c = 1 # std deviation
g = a * np.exp(-(x-b)**2/(2*c)**2)
g /= np.sum(g) # normalize
# integrate desired velocities to get desired positions over time
y_des = np.cumsum(g)
# want to follow the same trajectory in (x, y, z)
y_des = np.vstack([y_des, y_des, y_des])
# we can control the DMP rollout speed with the time step size
# the DMP will reach the target in 1s of sim time
dt = 1 / n_timesteps
self.dmps = pydmps.DMPs_discrete(n_dmps=3, n_bfs=50, dt=dt)
self.dmps.imitate_path(y_des)
def generate_path(self, position, target_pos, plot=False):
"""
Calls the step function self.n_timestep times to pregenerate
the entire path planner
PARAMETERS
----------
position: numpy.array
the current position of the system
target_pos: numpy.array
the target position
plot: boolean, optional (Default: False)
plot the path after generating if True
"""
self.reset(target_pos=target_pos, position=position)
self.position, self.velocity, _ = self.dmps.rollout(
timesteps=self.n_timesteps)
self.position = np.array([traj + self.origin for traj in self.position])
# reset trajectory index
self.n = 0
if plot:
plt.plot(self.position)
plt.legend(['X', 'Y', 'Z'])
plt.show()
return self.position, self.velocity
def reset(self, target_pos, position):
"""
Resets the dmp path planner to a new state and target_pos
PARAMETERS
----------
target_pos: list of 3 floats
the target_pos end-effector position in cartesian coordinates [meters]
position: list of 3 floats
the current end-effector cartesian position [meters]
"""
self.origin = position
self.dmps.reset_state()
self.dmps.goal = target_pos - self.origin
def _step(self, error=None):
"""
Steps through the dmp, returning the next position and velocity along
the path planner.
"""
if error is None:
error = 0
# get the next point in the target trajectory from the dmp
position, velocity, _ = self.dmps.step(error=error * self.error_scale)
# add the start position offset since the dmp starts from the origin
position = position + self.origin
return position, velocity
| StarcoderdataPython |
36667 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .core import LazyLoad
from .cache import cache, cache_parent
from .. import serializers, utils, types, errors, compat
from ..compat import Enum, six
RESOURCE_SIZE_MAX = 512 * 1024 * 1024 # a single resource's size must be at most 512M
class Resource(LazyLoad):
"""
Resource is useful when writing UDF or MapReduce. This is an abstract class.
Basically, resource can be either a file resource or a table resource.
File resource can be ``file``, ``py``, ``jar``, ``archive`` in details.
.. seealso:: :class:`odps.models.FileResource`, :class:`odps.models.PyResource`,
:class:`odps.models.JarResource`, :class:`odps.models.ArchiveResource`,
:class:`odps.models.TableResource`
"""
__slots__ = 'content_md5', 'is_temp_resource', 'volume_path', '_type_indicator'
class Type(Enum):
FILE = 'FILE'
JAR = 'JAR'
PY = 'PY'
ARCHIVE = 'ARCHIVE'
TABLE = 'TABLE'
VOLUMEFILE = 'VOLUMEFILE'
VOLUMEARCHIVE = 'VOLUMEARCHIVE'
UNKOWN = 'UNKOWN'
_type_indicator = 'type'
name = serializers.XMLNodeField('Name')
owner = serializers.XMLNodeField('Owner')
comment = serializers.XMLNodeField('Comment')
type = serializers.XMLNodeField('ResourceType', parse_callback=lambda t: Resource.Type(t.upper()))
creation_time = serializers.XMLNodeField('CreationTime', parse_callback=utils.parse_rfc822)
last_modified_time = serializers.XMLNodeField('LastModifiedTime', parse_callback=utils.parse_rfc822)
last_updator = serializers.XMLNodeField('LastUpdator')
size = serializers.XMLNodeField('ResourceSize', parse_callback=int)
source_table_name = serializers.XMLNodeField('TableName')
@classmethod
def _get_cls(cls, typo):
if typo is None:
return cls
if isinstance(typo, six.string_types):
typo = Resource.Type(typo.upper())
clz = lambda name: globals()[name]
if typo == Resource.Type.FILE:
return clz('FileResource')
elif typo == Resource.Type.JAR:
return clz('JarResource')
elif typo == Resource.Type.PY:
return clz('PyResource')
elif typo == Resource.Type.ARCHIVE:
return clz('ArchiveResource')
elif typo == Resource.Type.TABLE:
return clz('TableResource')
elif typo == Resource.Type.VOLUMEARCHIVE:
return clz('VolumeArchiveResource')
elif typo == Resource.Type.VOLUMEFILE:
return clz('VolumeFileResource')
else:
return cls
def create(self, overwrite=False, **kw):
raise NotImplementedError
@staticmethod
def _filter_cache(_, **kwargs):
return kwargs.get('type') is not None and kwargs['type'] != Resource.Type.UNKOWN
@cache
def __new__(cls, *args, **kwargs):
typo = kwargs.get('type')
if typo is not None or (cls != Resource and issubclass(cls, Resource)):
return object.__new__(cls._get_cls(typo))
kwargs['type'] = Resource.Type.UNKOWN
obj = Resource(**kwargs)
obj.reload()
return Resource(**obj.extract())
def __init__(self, **kwargs):
typo = kwargs.get('type')
if isinstance(typo, six.string_types):
kwargs['type'] = Resource.Type(typo.upper())
super(Resource, self).__init__(**kwargs)
@property
def _project(self):
return self._parent._parent.name
@property
def project(self):
return self._project
def reload(self):
url = self.resource()
resp = self._client.get(url, params={'meta': ''})
self.owner = resp.headers.get('x-odps-owner')
resource_type = resp.headers.get('x-odps-resource-type')
self.type = Resource.Type(resource_type.upper())
self.comment = resp.headers.get('x-odps-comment')
self.last_updator = resp.headers.get('x-odps-updator')
size = resp.headers.get('x-odps-resource-size')
self.size = None if size is None else int(size)
self.creation_time = utils.parse_rfc822(
resp.headers.get('x-odps-creation-time'))
self.last_modified_time = utils.parse_rfc822(
resp.headers.get('Last-Modified'))
self.source_table_name = resp.headers.get('x-odps-copy-table-source')
self.volume_path = resp.headers.get('x-odps-copy-file-source')
self.content_md5 = resp.headers.get('Content-MD5')
self._loaded = True
def _reload_size(self):
url = self.resource()
resp = self._client.get(url, params={'meta': ''})
size = resp.headers.get('x-odps-resource-size')
self.size = None if size is None else int(size)
def update(self, **kw):
raise NotImplementedError
def drop(self):
return self.parent.delete(self)
@cache_parent
class FileResource(Resource):
"""
File resource represents for a file.
Use ``open`` method to open this resource as an file-like object.
"""
__slots__ = '_fp', '_mode', '_opened', '_size', '_need_commit', \
'_open_binary', '_encoding'
class Mode(Enum):
READ = 'r'
WRITE = 'w'
APPEND = 'a'
READWRITE = 'r+'
TRUNCEREADWRITE = 'w+'
APPENDREADWRITE = 'a+'
def create(self, overwrite=False, **kw):
file_obj = kw.pop('file_obj', kw.pop('fileobj', None))
if file_obj is None:
raise ValueError('parameter `file_obj` cannot be None, either string or file-like object')
if isinstance(file_obj, six.text_type):
file_obj = file_obj.encode('utf-8')
if isinstance(file_obj, six.binary_type):
file_obj = six.BytesIO(file_obj)
if self.name is None or len(self.name.strip()) == 0:
raise errors.ODPSError('File Resource Name should not empty.')
method = self._client.post if not overwrite else self._client.put
url = self.parent.resource() if not overwrite else self.resource()
headers = {'Content-Type': 'application/octet-stream',
'Content-Disposition': 'attachment;filename=%s' % self.name,
'x-odps-resource-type': self.type.value.lower(),
'x-odps-resource-name': self.name}
if self._getattr('comment') is not None:
headers['x-odps-comment'] = self.comment
if self._getattr('is_temp_resource'):
headers['x-odps-resource-istemp'] = 'true' if self.is_temp_resource else 'false'
if not isinstance(file_obj, six.string_types):
file_obj.seek(0)
content = file_obj.read()
else:
content = file_obj
method(url, content, headers=headers)
if overwrite:
self.reload()
return self
def __init__(self, **kw):
super(FileResource, self).__init__(**kw)
self.type = Resource.Type.FILE
self._fp = None
self._mode = FileResource.Mode.READ
self._open_binary = False
self._encoding = None
self._size = 0
self._opened = False
self._need_commit = False
def _is_create(self):
if self._loaded:
return False
try:
self._reload_size()
return False
except errors.NoSuchObject:
return True
def open(self, mode='r', encoding='utf-8'):
"""
The argument ``mode`` stands for the open mode for this file resource.
It can be binary mode if the 'b' is inside. For instance,
'rb' means opening the resource as read binary mode
while 'r+b' means opening the resource as read+write binary mode.
This is most import when the file is actually binary such as tar or jpeg file,
so be aware of opening this file as a correct mode.
Basically, the text mode can be 'r', 'w', 'a', 'r+', 'w+', 'a+'
just like the builtin python ``open`` method.
* ``r`` means read only
* ``w`` means write only, the file will be truncated when opening
* ``a`` means append only
* ``r+`` means read+write without constraint
* ``w+`` will truncate first then opening into read+write
* ``a+`` can read+write, however the written content can only be appended to the end
:param mode: the mode of opening file, described as above
:param encoding: utf-8 as default
:return: file-like object
:Example:
>>> with resource.open('r') as fp:
>>> fp.read(1) # read one unicode character
>>> fp.write('test') # wrong, cannot write under read mode
>>>
>>> with resource.open('wb') as fp:
>>> fp.readlines() # wrong, cannot read under write mode
>>> fp.write('hello world') # write bytes
>>>
>>> with resource.open('test_resource', 'r+') as fp: # open as read-write mode
>>> fp.seek(5)
>>> fp.truncate()
>>> fp.flush()
"""
# TODO: when reading, do not read all the data at once
if 'b' in mode:
self._open_binary = True
mode = mode.replace('b', '')
self._mode = FileResource.Mode(mode)
self._encoding = encoding
if self._mode in (FileResource.Mode.WRITE, FileResource.Mode.TRUNCEREADWRITE):
io_clz = six.BytesIO if self._open_binary else six.StringIO
self._fp = io_clz()
self._size = 0
else:
self._fp = self.parent.read_resource(
self, text_mode=not self._open_binary, encoding=self._encoding)
self._reload_size()
self._sync_size()
self._opened = True
return self
def _check_read(self):
if not self._opened:
raise IOError('I/O operation on non-open resource')
if self._mode in (FileResource.Mode.WRITE, FileResource.Mode.APPEND):
raise IOError('Resource not open for reading')
def _sync_size(self):
curr_pos = self.tell()
self.seek(0, compat.SEEK_END)
self._size = self.tell()
self.seek(curr_pos)
def read(self, size=-1):
"""
Read the file resource, read all as default.
:param size: unicode or byte length depends on text mode or binary mode.
:return: unicode or bytes depends on text mode or binary mode
:rtype: str or unicode(Py2), bytes or str(Py3)
"""
self._check_read()
return self._fp.read(size)
def readline(self, size=-1):
"""
Read a single line.
:param size: If the size argument is present and non-negative,
it is a maximum byte count (including the trailing newline)
and an incomplete line may be returned.
When size is not 0,
an empty string is returned only when EOF is encountered immediately
:return: unicode or bytes depends on text mode or binary mode
:rtype: str or unicode(Py2), bytes or str(Py3)
"""
self._check_read()
return self._fp.readline(size)
def readlines(self, sizehint=-1):
"""
Read as lines.
:param sizehint: If the optional sizehint argument is present, instead of reading up to EOF,
whole lines totalling approximately sizehint bytes
(possibly after rounding up to an internal buffer size) are read.
:return: lines
:rtype: list
"""
self._check_read()
return self._fp.readlines(sizehint)
def _check_write(self):
if not self._opened:
raise IOError('I/O operation on non-open resource')
if self._mode == FileResource.Mode.READ:
raise IOError('Resource not open for writing')
def _check_size(self):
if self._size > RESOURCE_SIZE_MAX:
raise IOError('Single resource\'s max size is %sM' %
(RESOURCE_SIZE_MAX / (1024 ** 2)))
def _convert(self, content):
if self._open_binary and isinstance(content, six.text_type):
return content.encode(self._encoding)
elif not self._open_binary and isinstance(content, six.binary_type):
return content.decode(self._encoding)
return content
def write(self, content):
"""
Write content into the file resource
:param content: content to write
:return: None
"""
content = self._convert(content)
length = len(content)
self._check_write()
if self._mode in (FileResource.Mode.APPEND, FileResource.Mode.APPENDREADWRITE):
self.seek(0, compat.SEEK_END)
if length > 0:
self._need_commit = True
res = self._fp.write(content)
self._sync_size()
self._check_size()
return res
def writelines(self, seq):
"""
Write lines into the file resource.
:param seq: lines
:return: None
"""
seq = [self._convert(s) for s in seq]
length = sum(len(s) for s in seq)
self._check_write()
if self._mode in (FileResource.Mode.APPEND, FileResource.Mode.APPENDREADWRITE):
self.seek(0, compat.SEEK_END)
if length > 0:
self._need_commit = True
res = self._fp.writelines(seq)
self._sync_size()
self._check_size()
return res
def seek(self, pos, whence=compat.SEEK_SET): # io.SEEK_SET
"""
Seek to some place.
:param pos: position to seek
:param whence: if set to 2, will seek to the end
:return: None
"""
return self._fp.seek(pos, whence)
def tell(self):
"""
Tell the current position
:return: current position
"""
return self._fp.tell()
def truncate(self, size=None):
"""
Truncate the file resource's size.
:param size: If the optional size argument is present,
the file is truncated to (at most) that size.
The size defaults to the current position.
:return: None
"""
self._check_write()
curr_pos = self.tell()
self._fp.truncate(size)
self.seek(0, compat.SEEK_END)
self._size = self.tell()
self.seek(curr_pos)
self._need_commit = True
def flush(self):
"""
Commit the change to ODPS if any change happens.
Close will do this automatically.
:return: None
"""
if self._need_commit:
is_create = self._is_create()
resources = self.parent
if is_create:
resources.create(self=self, file_obj=self._fp)
else:
resources.update(obj=self, file_obj=self._fp)
self._need_commit = False
def close(self):
"""
Close this file resource.
:return: None
"""
self.flush()
self._fp = None
self._size = 0
self._need_commit = False
self._opened = False
def __iter__(self):
self._check_read()
return self._fp.__iter__()
def __next__(self):
self._check_read()
return next(self._fp)
next = __next__
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def update(self, file_obj):
return self._parent.update(self, file_obj=file_obj)
@cache_parent
class JarResource(FileResource):
"""
File resource representing for the .jar file.
"""
def __init__(self, **kw):
super(JarResource, self).__init__(**kw)
self.type = Resource.Type.JAR
@cache_parent
class PyResource(FileResource):
"""
File resource representing for the .py file.
"""
def __init__(self, **kw):
super(PyResource, self).__init__(**kw)
self.type = Resource.Type.PY
@cache_parent
class ArchiveResource(FileResource):
"""
File resource representing for the compressed file like .zip/.tgz/.tar.gz/.tar/jar
"""
def __init__(self, **kw):
super(ArchiveResource, self).__init__(**kw)
self.type = Resource.Type.ARCHIVE
@cache_parent
class TableResource(Resource):
"""
Take a table as a resource.
"""
def __init__(self, **kw):
project_name = kw.pop('project_name', None)
table_name = kw.pop('table_name', None)
partition_spec = kw.pop('partition', None)
super(TableResource, self).__init__(**kw)
self._init(project_name=project_name, table_name=table_name,
partition=partition_spec)
def create(self, overwrite=False, **kw):
if self.name is None or len(self.name.strip()) == 0:
raise errors.ODPSError('Table Resource Name should not be empty.')
method = self._client.post if not overwrite else self._client.put
url = self.parent.resource() if not overwrite else self.resource()
headers = {'Content-Type': 'text/plain',
'x-odps-resource-type': self.type.value.lower(),
'x-odps-resource-name': self.name,
'x-odps-copy-table-source': self.source_table_name}
if self._getattr('comment') is not None:
headers['x-odps-comment'] = self._getattr('comment')
method(url, '', headers=headers)
if overwrite:
del self.parent[self.name]
return self.parent[self.name]
return self
def _init(self, project_name=None, table_name=None, partition=None):
project_name = project_name or self._project
if project_name is not None and project_name != self._project:
from .projects import Projects
self._parent = Projects(_client=self._client)[project_name].resources
if table_name is not None:
self.source_table_name = '%s.%s' % (project_name, table_name)
if partition is not None:
if not isinstance(partition, types.PartitionSpec):
partition_spec = types.PartitionSpec(partition)
self.source_table_name = '%s partition(%s)' \
% (self.source_table_name.split(' partition(')[0],
partition_spec)
def get_source_table(self):
if self.source_table_name is None:
return
splits = self.source_table_name.split(' partition(')
src = splits[0]
if '.' not in src:
raise ValueError('Malformed source table name: %s' % src)
project_name, table_name = tuple(src.split('.', 1))
from .projects import Projects
return Projects(client=self._client)[project_name].tables[table_name]
def get_source_table_partition(self):
if self.source_table_name is None:
return
splits = self.source_table_name.split(' partition(')
if len(splits) < 2:
return
partition = splits[1].split(')', 1)[0].strip()
return types.PartitionSpec(partition)
@property
def table(self):
"""
Get the table object.
:return: source table
:rtype: :class:`odps.models.Table`
.. seealso:: :class:`odps.models.Table`
"""
return self.get_source_table()
@property
def partition(self):
"""
Get the source table partition.
:return: the source table partition
"""
pt = self.get_source_table_partition()
if pt is None:
return
return self.get_source_table().get_partition(pt)
def open_reader(self, **kwargs):
"""
Open reader on the table resource
"""
return self.get_source_table().open_reader(partition=self.get_source_table_partition(), **kwargs)
def open_writer(self, **kwargs):
"""
Open writer on the table resource
"""
return self.get_source_table().open_writer(partition=self.get_source_table_partition(), **kwargs)
def update(self, project_name=None, table_name=None, partition=None):
"""
Update this resource.
:param project_name: the source table's project
:param table_name: the source table's name
:param partition: the source table's partition
:return: self
"""
self._init(project_name=project_name, table_name=table_name,
partition=partition)
resources = self.parent
return resources.update(self)
@cache_parent
class VolumeResource(Resource):
def create(self, overwrite=False, **kw):
if self.name is None or len(self.name.strip()) == 0:
raise errors.ODPSError('Volume Resource Name should not be empty.')
method = self._client.post if not overwrite else self._client.put
url = self.parent.resource() if not overwrite else self.resource()
headers = {'Content-Type': 'text/plain',
'x-odps-resource-type': self.type.value.lower(),
'x-odps-resource-name': self.name,
'x-odps-copy-file-source': self.volume_path}
if self._getattr('comment') is not None:
headers['x-odps-comment'] = self._getattr('comment')
method(url, '', headers=headers)
if overwrite:
del self.parent[self.name]
return self.parent[self.name]
return self
@cache_parent
class VolumeFileResource(VolumeResource):
"""
Volume resource represents for a volume archive
"""
def __init__(self, **kw):
okw = kw.copy()
okw.pop('volume_file', None)
super(VolumeFileResource, self).__init__(**okw)
self.type = Resource.Type.VOLUMEFILE
def create(self, overwrite=False, **kw):
if 'volume_file' in kw:
vf = kw.pop('volume_file')
self.volume_path = vf.path
return super(VolumeFileResource, self).create(overwrite, **kw)
@cache_parent
class VolumeArchiveResource(VolumeFileResource):
"""
Volume archive resource represents for a volume archive
"""
def __init__(self, **kw):
super(VolumeArchiveResource, self).__init__(**kw)
self.type = Resource.Type.VOLUMEARCHIVE
| StarcoderdataPython |
35508 | <filename>languages/python/src/concepts/P104_Decorators_ClassBasedDecorators.py
# Description: Class Based Decorators
"""
### Note
* If you want to maintain some sort of state and/or just make your code more confusing, use class based decorators.
"""
class ClassBasedDecorator(object):
def __init__(self, function_to_decorate):
print("INIT ClassBasedDecorator")
self.function_to_decorate = function_to_decorate
def __call__(self, *args, **kwargs):
print("CALL ClassBasedDecorator")
return self.function_to_decorate(*args, **kwargs)
# Call Class Based Decorator
@ClassBasedDecorator
def function_1(*args):
for arg in args:
print(arg)
def function_2(*args):
for arg in args:
print(arg)
if __name__ == '__main__':
function_1(1, 2, 3)
# Call Class Based Decorator - Alternate way
function_2 = ClassBasedDecorator(function_2)
function_2(1, 2, 3)
| StarcoderdataPython |
154009 | # -*- coding: utf-8 -*-
# Author : <NAME>
# e-mail : <EMAIL>
# Powered by Seculayer © 2021 Service Model Team, R&D Center.
class StringUtil(object):
@staticmethod
def get_int(data) -> int:
try:
return int(data)
except ValueError:
return -1
@staticmethod
def get_boolean(data) -> bool:
val = str(data).lower()
if val == "y" or val == "true":
return True
else:
return False
| StarcoderdataPython |
1629138 | <reponame>PaulWay/osbuild<filename>osbuild/util/lorax.py
#!/usr/bin/python3
"""
Lorax related utilities: Template parsing and execution
This module contains a re-implementation of the Lorax
template engine, but for osbuild. Not all commands in
the original scripting language are support, but all
needed to run the post install and cleanup scripts.
"""
import contextlib
import glob
import os
import re
import shlex
import shutil
import subprocess
import mako.template
def replace(target, patterns):
finder = [(re.compile(p), s) for p, s in patterns]
newfile = target + ".replace"
with open(target, "r") as i, open(newfile, "w") as o:
for line in i:
for p, s in finder:
line = p.sub(s, line)
o.write(line)
os.rename(newfile, target)
def rglob(pathname, *, fatal=False):
seen = set()
for f in glob.iglob(pathname):
if f not in seen:
seen.add(f)
yield f
if fatal and not seen:
raise IOError(f"nothing matching {pathname}")
class Script:
# all built-in commands in a name to method map
commands = {}
# helper decorator to register builtin methods
class command:
def __init__(self, fn):
self.fn = fn
def __set_name__(self, owner, name):
bultins = getattr(owner, "commands")
bultins[name] = self.fn
setattr(owner, name, self.fn)
# Script class starts here
def __init__(self, script, build, tree):
self.script = script
self.tree = tree
self.build = build
def __call__(self):
for i, line in enumerate(self.script):
cmd, args = line[0], line[1:]
ignore_error = False
if cmd.startswith("-"):
cmd = cmd[1:]
ignore_error = True
method = self.commands.get(cmd)
if not method:
raise ValueError(f"Unknown command: '{cmd}'")
try:
method(self, *args)
except Exception:
if ignore_error:
continue
print(f"Error on line: {i} " + str(line))
raise
def tree_path(self, target):
dest = os.path.join(self.tree, target.lstrip("/"))
return dest
@command
def append(self, filename, data):
target = self.tree_path(filename)
dirname = os.path.dirname(target)
os.makedirs(dirname, exist_ok=True)
print(f"append '{target}' '{data}'")
with open(target, "a", encoding="utf-8") as f:
f.write(bytes(data, "utf8").decode("unicode_escape"))
f.write("\n")
@command
def mkdir(self, *dirs):
for d in dirs:
print(f"mkdir '{d}'")
os.makedirs(self.tree_path(d), exist_ok=True)
@command
def move(self, src, dst):
src = self.tree_path(src)
dst = self.tree_path(dst)
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
print(f"move '{src}' -> '{dst}'")
os.rename(src, dst)
@command
def install(self, src, dst):
dst = self.tree_path(dst)
for s in rglob(os.path.join(self.build, src.lstrip("/")), fatal=True):
with contextlib.suppress(shutil.Error):
print(f"install {s} -> {dst}")
shutil.copy2(os.path.join(self.build, s), dst)
@command
def remove(self, *files):
for g in files:
for f in rglob(self.tree_path(g)):
if os.path.isdir(f) and not os.path.islink(f):
shutil.rmtree(f)
else:
os.unlink(f)
print(f"remove '{f}'")
@command
def replace(self, pat, repl, *files):
found = False
for g in files:
for f in rglob(self.tree_path(g)):
found = True
print(f"replace {f}: {pat} -> {repl}")
replace(f, [(pat, repl)])
if not found:
assert found, f"No match for {pat} in {' '.join(files)}"
@command
def runcmd(self, *args):
print("run ", " ".join(args))
subprocess.run(args, cwd=self.tree, check=True)
@command
def symlink(self, source, dest):
target = self.tree_path(dest)
if os.path.exists(target):
self.remove(dest)
print(f"symlink '{source}' -> '{target}'")
os.symlink(source, target)
@command
def systemctl(self, verb, *units):
assert verb in ('enable', 'disable', 'mask')
self.mkdir("/run/systemd/system")
cmd = ['systemctl', '--root', self.tree, '--no-reload', verb]
for unit in units:
with contextlib.suppress(subprocess.CalledProcessError):
args = cmd + [unit]
self.runcmd(*args)
def brace_expand(s):
if not ('{' in s and ',' in s and '}' in s):
return [s]
result = []
right = s.find('}')
left = s[:right].rfind('{')
prefix, choices, suffix = s[:left], s[left+1:right], s[right+1:]
for choice in choices.split(','):
result.extend(brace_expand(prefix+choice+suffix))
return result
def brace_expand_line(line):
return [after for before in line for after in brace_expand(before)]
def render_template(path, args):
"""Render a template at `path` with arguments `args`"""
with open(path, "r") as f:
data = f.read()
tlp = mako.template.Template(text=data, filename=path)
txt = tlp.render(**args)
lines = map(lambda l: l.strip(), txt.splitlines())
lines = filter(lambda l: l and not l.startswith("#"), lines)
commands = map(shlex.split, lines)
commands = map(brace_expand_line, commands)
result = list(commands)
return result
| StarcoderdataPython |
3252401 | <reponame>wilsenmuts/labsecurity
from libsecurity.scanners import *
from libsecurity.main import *
scanner = scanner()
interpreter = interpreter()
scanports = scanner.scanports
scanport = scanner.scanport
scanip = scanner.scanip
scanweb = scanner.scanweb
scanns = scanner.scanns
getwpv = scanner.getwpv
help = interpreter.help
command_ls = interpreter.command_ls
show_options = interpreter.show_options
command_clear = interpreter.command_clear
| StarcoderdataPython |
116323 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/11/6 16:45
# @File : leetCode_657.py
'''
思路,
U,D,L,R
分布对应+1, -1操作,最后比较值即可
'''
class Solution(object):
def judgeCircle(self, moves):
"""
:type moves: str
:rtype: bool
"""
steps = {
"U": 1,
"D": -1,
"L": -1,
"R": 1
}
a, b = 0
for m in moves:
if m in ("U", "D"):
a += steps[m]
if m in ("L", "R"):
b += steps[m]
return a == 0 and b == 0
# 充分利用str的库的方法,经典
# return moves.count('L') == moves.count('R') and moves.count('U') == moves.count('D') | StarcoderdataPython |
3366333 | <reponame>backtick-se/cowa
# flake8: noqa: 401
from .docker import DockerProvider
from .task import DockerTask
| StarcoderdataPython |
1769110 | <reponame>DeqiTang/pymatflow
import os
def vaspSubparser(subparsers):
# --------------------------------------------------------------------------
# VASP
# --------------------------------------------------------------------------
subparser = subparsers.add_parser("vasp", help="using vasp as calculator")
# run params
# -----------------------------------------------------------------
gp = subparser.add_argument_group(title="overall running control", description="control the task running parameters")
gp.add_argument("-d", "--directory", type=str, default="matflow-running",
help="directory to generate all the files, do not specify the current directory")
gp.add_argument("-r", "--runtype", type=int, default=0,
choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
help="choices of runtype. 0->static_run; 1->optimization; 2->cubic-cell; 3->hexagonal-cell; 4->tetragonal-cell; 5->neb; 6->vasp-phonon; 7->phonopy; 8->surf pes; 9->abc; 10->AIMD, 11->custom")
# run option
gp.add_argument("--runopt", type=str, default="gen",
choices=["gen", "run", "genrun"],
help="Generate or run or both at the same time.")
gp.add_argument("--auto", type=int, default=3,
choices=[0, 1, 2, 3],
help="auto:0 nothing, 1: copying files to server, 2: copying and executing, 3: pymatflow run inserver with direct submit, in order use auto=1, 2, you must make sure there is a working ~/.pymatflow/server_[pbs|llhpc].conf")
gp.add_argument("--mpi", type=str, default="",
help="MPI command, used in single node running, namely --auto 0 --runopt genrun")
gp.add_argument("--server", type=str, default="pbs",
choices=["pbs", "llhpc", "lsf_sz", "tianhe2", "lsf_sustc", "cdcloud"],
help="type of remote server, can be pbs or llhpc or lsf_sz or lsf_sustc")
gp.add_argument("--jobname", type=str, default="matflow-running",
help="jobname on the pbs server")
gp.add_argument("--nodes", type=int, default=1,
help="Nodes used in server")
gp.add_argument("--ppn", type=int, default=32,
help="ppn of the server")
gp.add_argument("--queue", type=str, default=None,
help="the queue to submit to job, default is not set")
# llhpc
gp.add_argument("--partition", type=str, default="free",
help="choose partition to submit job, now only apply for llhpc")
gp.add_argument("--ntask", type=int, default=24,
help="choose task number, now only apply for llhpc")
gp.add_argument("--stdout", type=str, default="slurm.out",
help="set standard out, now only apply for llhpc")
gp.add_argument("--stderr", type=str, default="slurm.err",
help="set standard err, now only apply for llhpc")
# actually this can be put in the main subparser, but it will make the command not like git sub-cmmand
# so we put them in every subsubparser
gp = subparser.add_mutually_exclusive_group(required=True) # at leaset one of cif and xyz is provided
# argparse will make sure only one of argument in structfile(xyz, cif) appear on command line
gp.add_argument("--xyz", type=str, default=None,
help="The xyz structure file with the second line specifying the cell parameter")
gp.add_argument("--cif", type=str, default=None,
help="The cif structure file")
gp.add_argument("--xsd", type=str, default=None,
help="The xsd structure file")
gp.add_argument("--xsf", type=str, default=None,
help="The xsf structure file")
gp.add_argument("--images", type=str, nargs="+",
help="the image stucture file(--images first.cif final.xsd), can only be cif, xsd, xsd, or xyz(second line is cell parameter) format")
# potential file
gp = subparser.add_argument_group(title="pseudopotential")
gp.add_argument("--pot", type=str, default="./",
help="specify the path to the POTCAR, default is ./. if you pass 'auto' to it, matflow will build the POTCAR foryou(need simple configuration, see manual)")
gp.add_argument("--pot-type", type=str, default="PAW_PBE",
choices=["PAW_PBE", "PAW_LDA", "PAW_PW91", "paw_pbe", "paw_lda", "paw_pw91"],
help="choose type of POT for POTCAR")
# --------------------------------------------------------
# INCAR PARAMETERS
# --------------------------------------------------------
# allow manual setting of any INCAR paramaters by cmd line
gp = subparser.add_argument_group(title="incar->any",
description="manual setting of any INCAR parameters")
gp.add_argument("--incar-manual", type=str, default=None,
help="manual setting of INCAR like this: --incar-manual \'ENCUT=100; PREC=A;\'")
# incar->start parameters
gp = subparser.add_argument_group(title="incar->start parameters",
description="start parameters to be set in INCAR")
gp.add_argument("--nwrite", type=int, default=None,
help=" This flag determines how much will be written to the file OUTCAR (verbosity flag)")
gp.add_argument("--prec", type=str, default=None,
choices=["Normal", "Accurate", "A", "N", "Low", "L","" "Single"],
help="PREC, default value: Normal")
gp.add_argument("--ncore", type=int, default=None,
help="NCORE determines the number of compute cores that work on an individual orbital.")
gp.add_argument("--npar", type=int, default=None,
help="NPAR determines the number of bands that are treated in parallel.")
gp.add_argument("--kpar", type=int, default=None,
help="KPAR determines the number of k-points that are to be treated in parallel")
gp.add_argument("--lplane", type=str, default=None,
choices=[".TRUE.", ".FALSE.", "T", "F"],
help="LPLANE switches on the plane-wise data distribution in real space. default is .TRUE.")
gp.add_argument("--nsim", type=int, default=None,
help="NSIM sets the number of bands that are optimized simultaneously by the RMM-DIIS algorithm. default NSIM=4")
# incar->electrons
gp = subparser.add_argument_group(title="incar->electron",
description="electrons calculation related parameters")
gp.add_argument("--encut", type=int, default=None,
help="ENCUT, default value: 300 eV")
gp.add_argument("--ediff", type=float, default=None,
help="EDIFF, default value: 1.0e-4")
gp.add_argument("--nelm", type=int, default=None,
help="NELM sets the maximum number of electronic SC (selfconsistency) steps which may be performed")
gp.add_argument("--nfree", type=int, default=None,
help="NFREE specifies the number of remembered steps in the history of ionic convergence runs, or the number of ionic displacements in frozen phonon calculations")
gp.add_argument("--kpoints-mp", type=int, nargs=6,
default=[1, 1, 1, 0, 0, 0],
help="set kpoints like -k 1 1 1 0 0 0")
#gp.add_argument("--kpoints-mp-scf", type=int, nargs="+",
# default=[1, 1, 1, 0, 0, 0],
# help="set kpoints like -k 1 1 1 0 0 0")
gp.add_argument("--kpoints-mp-nscf", type=int, nargs="+",
default=None,
help="set kpoints like -k 1 1 1 0 0 0")
gp.add_argument("--kpath-manual", type=str, nargs="+", default=None,
help="set kpoints for band structure calculation manually")
gp.add_argument("--kpath-file", type=str, default="kpath.txt",
help="set kpoints for band structure calculation manually from file")
gp.add_argument("--kpath-intersections", type=int, default=15,
help="intersection of the line mode kpoint for band calculation")
gp.add_argument("--ismear", type=int, default=None,
help="smearing type(methfessel-paxton(>0), gaussian(0), fermi-dirac(-1), tetra(-4), tetra-bloch-dorrected(-5)), default: 0")
gp.add_argument("--sigma", type=float, default=None,
help="determines the width of the smearing in eV.")
gp.add_argument("--ivdw", type=int, default=None,
choices=[0, 11, 12, 21, 202, 4],
help="IVDW = 0(no correction), 1(dft-d2), 11(dft-d3 Grimme), 12(dft-d3 Becke-Jonson), default: None which means 0, no correction")
# -----------------------------------------------------------------
gp.add_argument("--lorbit", type=int, default=None,
choices=[0, 1, 2, 5, 10, 11, 12],
help="together with an appropriate RWIGS, determines whether the PROCAR or PROOUT files are written")
# optics related
gp.add_argument("--loptics", type=str, default=None,
choices=["TRUE", "FALSE"],
help="calculates the frequency dependent dielectric matrix after the electronic ground state has been determined.")
gp.add_argument("--cshift", type=float, default=None,
help="CSHIFT sets the (small) complex shift η in the Kramers-Kronig transformation")
gp.add_argument("--nedos", type=int, default=None,
help="NEDOS specifies number of gridpoints on which the DOS is evaluated")
# magnetic related
gp.add_argument("--ispin", type=int, default=None,
choices=[1, 2],
help="specifies spin polarization: 1->no spin polarized, 2->spin polarized(collinear). combine SIPIN with MAGMOM to study collinear magnetism.")
gp.add_argument("--magmom", type=float, nargs="+", default=None,
help="Specifies the initial magnetic moment for each atom, if and only if ICHARG=2, or if ICHARG=1 and the CHGCAR file contains no magnetisation density.")
gp.add_argument("--lnoncollinear", type=str, default=None,
choices=["T", "F", ".TRUE.", ".FALSE."],
help="specifies whether fully non-collinear magnetic calculations are performed")
gp.add_argument("--lsorbit", type=str, default=None,
choices=["T", "F", ".TRUE.", ".FALSE."],
help="specifies whether spin-orbit coupling is taken into account.")
gp.add_argument("--saxis", type=float, nargs=3, default=None,
help="SAXIS specifies the quantisation axis for noncollinear spins")
gp.add_argument("--lmaxmix", type=int, default=None,
help="LMAXMIX controls up to which l-quantum number the one-center PAW charge densities are passed through the charge density mixer and written to the CHGCAR file.")
# hybrid functional
gp = subparser.add_argument_group(title="incar->Exchange correlation")
gp.add_argument("--lhfcalc", type=str, default=None,
choices=["T", "F", ".TRUE.", ".FALSE."],
help=" specifies whether Hartree-Fock/DFT hybrid functional type calculations are performed")
gp.add_argument("--hfscreen", type=float, default=None,
choices=[0.3, 0.2],
help=" specifies the range-separation parameter in range separated hybrid functionals: HSE03->0.3, HSE06->0.2, must also set LHFCALC=.TRUE.")
gp.add_argument("--aexx", type=float, default=None,
help="AEXX specifies the fraction of exact exchange in a Hartree-Fock/DFT hybrid functional type calculation")
gp.add_argument("--lsubrot", type=str, default=None,
choices=["T", "F", ".TRUE.", ".FALSE."],
help="This flag can be set for hybrid functionals (HF-type calculations).")
gp.add_argument("--nsw", type=int, default=None,
help="NSW sets the maximum number of ionic steps")
gp.add_argument("--ediffg", type=float, default=None,
help="EDIFFG, default value: 10*EDIFF")
gp = subparser.add_argument_group(title="incar->ions",
description="setting ions related parameters")
gp.add_argument("--ibrion", type=int, default=None,
choices=[-1, 0, 1, 2, 3, 5, 6, 7, 8, 44],
help="IBRION = refer to https://cms.mpi.univie.ac.at/wiki/index.php/IBRION for how to set the algorithm of optimization you need!")
gp.add_argument("--isif", type=int, default=None,
choices=[0, 1, 2, 3, 4, 5, 6, 7],
help="ISIF = 0-7: refer to https://cms.mpi.univie.ac.at/wiki/index.php/ISIF for how to set the type of Geometri Optimization you need!")
gp.add_argument("--potim", type=float, default=None,
help="step width scaling (ionic relaxations), default: None = 0.015 in phonon calculation")
gp.add_argument("--selective-dynamics", type=str, default="False",
choices=["True", "False", "T", "F"],
help="whether use selective dyanmics")
gp = subparser.add_argument_group(title="molecular dynamics",
description="molecular dynamics related")
gp.add_argument("--smass", type=int, default=None,
help="controls the velocities during an ab-initio molecular dynamics run.")
gp.add_argument("--mdalgo", type=int, default=None,
choices=[0, 1, 2, 3, 11, 21, 13],
help="specifies the molecular dynamics simulation protocol (in case IBRION=0 and VASP was compiled with -Dtbdyn).")
gp.add_argument("--anderson-prob", type=float, default=None,
help=" sets the collision probability for the Anderson thermostat (in case VASP was compiled with -Dtbdyn).")
gp.add_argument("--tebeg", type=float, default=None,
help=" sets the start temperature for an ab-initio molecular dynamics run (IBRION=0) and other routines (e.g. Electron-phonon interactions from Monte-Carlo sampling).")
gp.add_argument("--teend", type=float, default=None,
help="sets the final temperature for an ab-initio molecular dynamics run (IBRION=0; SMASS=−1).")
# incar-miscellaneous
gp = subparser.add_argument_group(title="incar-miscellaneous",
description="miscellaneous input parameters")
gp.add_argument("--algo", type=str, default=None,
choices=["N", "D", "V", "F", "VeryFast"], #"Exact", "G0W0", "GW0", "GW"],
help=" a convenient option to specify the electronic minimisation algorithm (as of VASP.4.5) and/or to select the type of GW calculations")
gp.add_argument("--ialgo", type=int, default=None,
choices=[5, 6, 7, 8, 38, 44, 46, 48],
help="IALGO selects the algorithm used to optimize the orbitals.Mind: We strongly urge the users to select the algorithms via ALGO. Algorithms other than those available via ALGO are subject to instabilities.")
gp.add_argument("--addgrid", type=str, default=None,
choices=[".TRUE.", ".FALSE.", "T", "F"],
help="ADDGRID determines whether an additional support grid is used for the evaluation of the augmentation charges.")
gp.add_argument("--isym", type=int, default=None,
choices=[-1, 0, 1, 2, 3],
help=" ISYM determines the way VASP treats symmetry.")
gp.add_argument('--lreal', type=str, default=None,
choices=["T", "F", ".TRUE.", ".FALSE.", "O", "On", "A", "Auto"],
help="LREAL determines whether the projection operators are evaluated in real-space or in reciprocal space.")
gp.add_argument("--pstress", type=float, default=None,
help="controls whether Pulay corrections are added to the stress tensor or not.")
gp.add_argument("--lepsilon", type=str, default=None,
choices=["T", "F", ".TRUE.", ".FALSE."],
help="determines the static dielectric matrix, ion-clamped piezoelectric tensor and the Born effective charges using density functional perturbation theory.")
gp.add_argument("--lpead", type=str, default=None,
choices=["T", "F", ".TRUE.", ".FALSE."],
help="the derivative of the cell-periodic part of the orbitals w.r.t. k, |∇kunk〉, is calculated using finite differences.")
gp.add_argument("--lrpa", type=str, default=None,
choices=["T", "F", ".TRUE.", ".FALSE."],
help="includes local field effect on the Hartree level only.")
# properties parameters
gp.add_argument("--lelf", type=str, default=None,
choices=["T", "F", ".TRUE.", ".FALSE."],
help="LELF determines whether to create an ELFCAR file or not.")
# write PARAMETERS
gp = subparser.add_argument_group(title="incar->write parameters",
description="set writing parameters")
gp.add_argument("--lwave", type=str, default=None,
choices=['T', 'F', ".TRUE.", '.FALSE.'],
help="LWAVE determines whether the wavefunctions are written to the WAVECAR file at the end of a run.")
gp.add_argument("--lcharg", type=str, default=None,
choices=['T', 'F', ".TRUE.", '.FALSE.'],
help="LCHARG determines whether the charge densities (files CHGCAR and CHG) are written.")
# neb related PARAMETERS
# --------------------------------------------------------------------------
gp = subparser.add_argument_group(title="incar->neb",
description="nudged elastic band related setting")
gp.add_argument("--iopt", type=int, default=None,
choices=[0, 1, 2, 3],
help="chioce for optimizer: 0->vasp, 1, 2->vtst")
gp.add_argument("--lclimb", type=str, default=None,
choices=["T", "F"],
help="whether use climbing image")
gp.add_argument("--lnebcell", type=str, default=None,
choices=["T", "F"],
help="flag to turn on SS-NEB, used with ISIF=3 and IOPT=3")
gp.add_argument("--spring", type=int, default=None,
help="gives the spring constant between the images as used in the elastic band method")
gp.add_argument("--maxmove", type=float, default=None,
help="maximum allowed step size for translation, default is None which means 0.2")
gp.add_argument("--lglobal", type=str, default=None,
choices=["T", "F"],
help="optimize the NEB globally instead of image-by-image, default is None which means .TRUE.")
gp.add_argument("--lautoscale", type=str, default=None,
choices=["T", "F"],
help="automatically determines INVCURV, default is T")
gp.add_argument("--invcurv", type=float, default=None,
help="initial inverse curvature, used to construct the inverse Hessian matrix. default is None which means 0.01")
gp.add_argument("--llineopt", type=str, default=None,
choices=["T", "F"],
help="use a force based line minimizer for translation. default is None(means F)")
gp.add_argument("--fdstep", type=float, default=None,
help="finite difference step size for line optimizer, default is None(5E-3)")
gp.add_argument("--nimage", type=int, default=None,
help="number of image to interpolate. total image will be nimage+2.")
gp.add_argument("--outcars", type=str, nargs="+",
help="OUTCAR for the initial and final structure in neb calc")
gp.add_argument("--nebmake", type=int, default=0,
choices=[0, 1],
help="0(default): use nebmake.pl, 1: use nebmake.py")
gp.add_argument("--moving-atom", type=int, nargs="+",
help="spedifying moving atoms, only needed when --nebmake=1 using nebmake.py. index start from 0")
# PHONOPY parameters
# ----------------------------------------
gp = subparser.add_argument_group(title="phonopy parameters",
description="setting of parameters needed by phonopy")
gp.add_argument("--supercell-n", type=int, nargs="+",
default=[1, 1, 1],
help="supercell for phonopy, like [2, 2, 2]")
# range_a range_b range_c
# ----------------------------------------------
gp = subparser.add_argument_group(title="cell optimization",
description="cubic, hexagonal, tetragonal cell or general abc optimization parameters")
gp.add_argument("--range-a", type=float, nargs=3,
default=[-0.1, 0.1, 0.01],
help="test range for a")
gp.add_argument("--range-b", type=float, nargs=3,
default=[-0.1, 0.1, 0.01],
help="test range for b")
gp.add_argument("--range-c", type=float, nargs=3,
default=[-0.1, 0.1, 0.01],
help="test range for c")
gp.add_argument("--batch-a", type=int,
default=None,
help="number of structure each batch a")
gp.add_argument("--batch-b", type=int,
default=None,
help="number of structure each batch b")
gp.add_argument("--batch-c", type=int,
default=None,
help="number of structure each batch c")
# incar template
gp = subparser.add_argument_group(title="template",
description="read in INCAR template")
gp.add_argument("--incar", type=str, default=None,
help="specify incar template to set parameters")
# surf pes
gp = subparser.add_argument_group(title="surf pes",
description="surf pes")
gp.add_argument("--move-atom", type=int, nargs="+",
default=[-1],
help="specify the atoms to move, index starts from 0")
gp.add_argument("--xrange", type=float, nargs="+",
default=[1, 3, 0.5],
help="x range for moving the specified moving atoms.")
gp.add_argument("--yrange", type=float, nargs="+",
default=[3, 5, 0.5],
help="y range for moving the specified moving atoms.")
gp.add_argument("--zshift", type=float,
default=0.0,
help="z shift for the moving atoms, will shift the z of specified moving atoms by value of zshift")
gp.add_argument("--fix-z", type=int, default=1,
choices=[0, 1, 2],
help="0 -> do not fix any z of the atoms, 1: only fix z of the buttom atoms, 2: fix z of both the buttom and the moving atoms. default is 1")
gp.add_argument("--fix-y", type=int, default=2,
choices=[0, 1, 2],
help="0 -> do not fix any z of the atoms, 1: only fix z of the buttom atoms, 2: fix z of both the buttom and the moving atoms. default is 2")
gp.add_argument("--fix-x", type=int, default=2,
choices=[0, 1, 2],
help="0 -> do not fix any z of the atoms, 1: only fix z of the buttom atoms, 2: fix z of both the buttom and the moving atoms. default is 2")
gp.add_argument("--batch-x-y", type=int, nargs=2, default=None,
help="number of structures to calculate each batch x and y, default is all in one batch")
# fix atoms
gp = subparser.add_argument_group(title="fix atoms",
description="specify atoms to fix in optimization, only used when --runtype=1")
gp.add_argument("--fix", help="list of fixed atoms, index start from 1, have privilege over --fix-around-z", nargs='+', type=int)
gp.add_argument("--fix-around-z", type=float, nargs=3, default=None,
help="select atoms around specified z in Angstrom with tolerance, like this --fix-around-z 10 -0.5 0.5")
gp.add_argument("--color-fix", type=str, default="white",
choices=["red", "green", "blue", "white"],
help="select color to color the fixed atoms in xsd file, can be: red green blue and white")
# static calc related setting
gp = subparser.add_argument_group(title="static calc",
description="setting type of static calculation when -r 0")
gp.add_argument("--static", type=str, default="band",
choices=["scf", "band", "dos", "optics", "bse", "parchg","stm"],
help="in case of band(default), run scf, nscf(bands) in a single run; in case of scf, run scf only, in case of optics, run scf and optics calc in a single run")
gp.add_argument("--hse-in-scf", type=str, default="false",
choices=["true", "false", "True", "False"],
help="choose whether to use HSE in both scf and nscf or only in nscf, when calc band structure")
gp.add_argument("--bse-level", type=int, default=0,
choices=[0, 1, 2],
help="0 -> bse on standard DFT; 1 -> bse on hybrid functional; 2 -> bse on GW")
gp.add_argument("--algo-gw", type=str, default="EVGW",
choices=["EVGW", "GW", "GW0", "QPGW0", "QPGW"],
help="ALGO used for GW")
# VASP PARCHG STM
gp = subparser.add_argument_group(title="PARCHG(STM) related",
description="PARCHG(STM) calc related parameters")
gp.add_argument("--lpard", type=str, default=None,
choices=[".TRUE.", "T", "F", ".FALSE."],
help="Determines whether partial (band or k-point decomposed) charge densities are evaluated.")
gp.add_argument("--lsepk", type=str, default=None,
choices=[".TRUE.", "T", "F", ".FALSE."],
help="Specifies whether the charge density of every k-point is write to the files PARCHG.*.nk (LSEPK=.TRUE.) or whether it is merged to a single file.")
gp.add_argument("--lsepb", type=str, default=None,
choices=[".TRUE.", "T", "F", ".FALSE."],
help="Specifies whether the charge density is calculated for every band separately and written to a file PARCHG.nb.* (LSEPB=.TRUE.) or whether charge density is merged for all selected bands and written to the files PARCHG.ALLB.* or PARCHG.")
gp.add_argument("--nbmod", type=int, default=None,
help="Controls which bands are used in the calculation of Band decomposed charge densities.")
gp.add_argument("--eint", type=float, nargs=2,
help="Specifies the energy range of the bands that are used for the evaluation of the partial charge density needed in Band decomposed charge densities.")
# miscellaneous
gp = subparser.add_argument_group(title="miscellaneous",
description="miscallaneous setting")
gp.add_argument("--symprec", type=float, default=None,
help="determines how accurately the positions in the POSCAR file must be specified. The default, SYMPREC=10-5, is usually large enough, even if the POSCAR file has been generated with single precision accuracy. Increasing SYMPREC means that the positions in the POSCAR file can be specified with less accuracy (increasing fuzziness).")
gp.add_argument("--amix", type=float, default=None,
help="specifies the linear mixing parameter.")
gp.add_argument("--bmix", type=float, default=None,
help="sets the cutoff wave vector for Kerker mixing scheme")
gp.add_argument("--nelect", type=int, default=None,
help="sets the number of electrons")
gp.add_argument("--laechg", type=str, default=None,
choices=[".TRUE.", ".FALSE.", "T", "F"],
help="for bader analysis. when LAECHG=.TRUE. the all-electron charge density will be reconstructed explicitly and written out to file.")
gp.add_argument("--lvhar", type=str, default=None,
choices=[".TRUE.", ".FALSE.", "T", "F"],
help="This tag determines whether the total local potential (saved in the file LOCPOT) contains the entire local potential (ionic + Hartree + exchange correlation) or the electrostatic contributions only (ionic + Hartree).")
def vaspDriver(args):
from pymatflow.cmd.matflow import getXyzFile
xyzfile, images = getXyzFile(args)
# server
# xxx.set_run can only deal with pbs, llhpc, lsf_sz server now
# however both guangzhou chaosuan llhpc are build on tianhe2, so they can use the same job system(yhbatch...)
# we add tianhe2 option to args.server which cannot be handled by xxx.set_run. so we convert it to llhpc if tianhe2 is chosen
server = args.server if args.server != "tianhe2" else "llhpc"
params = {}
# deal with INCAR template specified by --incar
if args.incar == None:
pass
else:
if not os.path.exists(args.incar):
print("====================================================\n")
print(" Warning !!!!\n")
print("----------------------------------------------------\n")
print("matflow vasp:\n")
print("the specified incar file by --incar doesn't exist\n")
print("go and check it\n")
sys.exit(1)
with open(args.incar, 'r') as fin:
incar = fin.readlines()
for line in incar:
if len(line.split()) == 0:
continue
if line[0] == "#":
continue
if len(line.split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value INCAR variable
params[line.split("=")[0].split()[0].upper()] = line.split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
params[line.split("=")[0].split()[0].upper()] = line.split("\n")[0].split("#")[0].split("=")[1].split()
# deal with INCAR cmd line setting specified by --incar-manual
if args.incar_manual != None:
for pair in args.incar_manual.split(";"):
if pair == "":
continue
if len(pair.split("=")) == 2:
# in case of single value INCAR varialbe
params[pair.split("=")[0].split()[0].upper()] = pair.split("=")[1].split()[0]
else:
params[pair.split("=")[0].split()[0].upper()] = pair.split("=")[1].split()
#
# if xxx is alraedy in params(set from --incar) and args.xxx is None
# params[xxx] will not be control by args.xxx
params["NWRITE"] = args.nwrite if "NWRITE" not in params or args.nwrite != None else params["NWRITE"]
params["PREC"] = args.prec if "PREC" not in params or args.prec != None else params["PREC"]
params["NCORE"] = args.ncore if "NCORE" not in params or args.ncore != None else params["NCORE"]
params["NPAR"] = args.npar if "NPAR" not in params or args.npar != None else params["NPAR"]
params["KPAR"] = args.kpar if "KPAR" not in params or args.kpar != None else params["KPAR"]
params["LPLANE"] = args.lplane if "LPLANE" not in params or args.lplane != None else params["LPLANE"]
params["NSIM"] = args.nsim if "NSIM" not in params or args.nsim != None else params["NSIM"]
params["ENCUT"] = args.encut if "ENCUT" not in params or args.encut != None else params["ENCUT"]
params["EDIFF"] = args.ediff if "EDIFF" not in params or args.ediff != None else params["EDIFF"]
params["NELM"] = args.nelm if "NELM" not in params or args.nelm != None else params["NELM"]
params["NFREE"] = args.nfree if "NFREE" not in params or args.nfree != None else params["NFREE"]
params["ISMEAR"] = args.ismear if "ISMEAR" not in params or args.ismear != None else params["ISMEAR"]
params["SIGMA"] = args.sigma if "SIGMA" not in params or args.sigma != None else params["SIGMA"]
params["IVDW"] = args.ivdw if "IVDW" not in params or args.ivdw != None else params["IVDW"]
params["EDIFFG"] = args.ediffg if "EDIFFG" not in params or args.ediffg != None else params["EDIFFG"]
params["NSW"] = args.nsw if "NSW" not in params or args.nsw != None else params["NSW"]
params["IBRION"] = args.ibrion if "IBRION" not in params or args.ibrion != None else params["IBRION"]
params["ISIF"] = args.isif if "ISIF" not in params or args.isif != None else params["ISIF"]
params["POTIM"] = args.potim if "POTIM" not in params or args.potim != None else params["POTIM"]
params["LORBIT"] = args.lorbit if "LORBIT" not in params or args.potim != None else params["LORBIT"]
params["LOPTICS"] = args.loptics if "LOPTICS" not in params or args.loptics != None else params["LOPTICS"]
params["CSHIFT"] = args.cshift if "CSHIFT" not in params or args.cshift != None else params["CSHIFT"]
params["NEDOS"] = args.nedos if "NEDOS" not in params or args.nedos != None else params["NEDOS"]
params["LSUBROT"] = args.lsubrot if "LSUBROT" not in params or args.lsubrot != None else params["LSUBROT"]
params["SAXIS"] = args.saxis if "SAXIS" not in params or args.saxis != None else params["SAXIS"]
params["LMAXMIX"] = args.lmaxmix if "LMAXMIX" not in params or args.lmaxmix != None else params["LMAXMIX"]
params["MDALGO"] = args.mdalgo if "MDALGO" not in params or args.mdalgo != None else params["MDALGO"]
params["SMASS"] = args.smass if "SMASS" not in params or args.smass != None else params["SMASS"]
params["ANDERSON_PROB"] = args.anderson_prob if "ANDERSON_PROB" not in params or args.anderson_prob != None else params["ANDERSON_PROB"]
params["TEBEG"] = args.tebeg if "TEBEG" not in params or args.tebeg != None else params["TEBEG"]
params["TEEND"] = args.teend if "TEEND" not in params or args.teend != None else params["TEEND"]
params["ALGO"] = args.algo if "ALGO" not in params or args.algo != None else params["ALGO"]
params["IALGO"] = args.ialgo if "IALGO" not in params or args.ialgo != None else params["IALGO"]
params["ADDGRID"] = args.addgrid if "ADDGRID" not in params or args.addgrid != None else params["ADDGRID"]
params["ISYM"] = args.isym if "ISYM" not in params or args.isym != None else params["ISYM"]
params["LREAL"] = args.lreal if "LREAL" not in params or args.lreal != None else params["LREAL"]
params["PSTRESS"] = args.pstress if "PSTRESS" not in params or args.pstress != None else params["PSTRESS"]
params["LWAVE"] = args.lwave if "LWAVE" not in params or args.lwave != None else params["LWAVE"]
params["LCHARG"] = args.lcharg if "LCHARG" not in params or args.lcharg != None else params["LCHARG"]
params["ISPIN"] = args.ispin if "ISPIN" not in params or args.ispin != None else params["ISPIN"]
params["MAGMOM"] = args.magmom if "MAGMOM" not in params or args.magmom != None else params["MAGMOM"] # magmom can be a list that can be automatically dealt with by base.incar.to_incar()
params["LNONCOLLINEAR"] = args.lnoncollinear if "LNONCOLLINEAR" not in params or args.lnoncollinear != None else params["LNONCOLLINEAR"]
params["LSORBIT"] = args.lsorbit if "LSORBIT" not in params or args.lsorbit != None else params["LSORBIT"]
params["ALGO"] = args.algo if "ALGO" not in params or args.algo != None else params["ALGO"]
params["LHFCALC"] = args.lhfcalc if "LHFCALC" not in params or args.lhfcalc != None else params["LHFCALC"]
params["HFSCREEN"] = args.hfscreen if "HFSCREEN" not in params or args.hfscreen != None else params["HFSCREEN"]
params["AEXX"] = args.aexx if "AEXX" not in params or args.aexx != None else params["AEXX"]
params["LELF"] = args.lelf if "LELF" not in params or args.lelf != None else params["LELF"]
params["IOPT"] = args.iopt if "IOPT" not in params or args.iopt != None else params["IOPT"]
params["LCLIMB"] = args.lclimb if "LCLIMB" not in params or args.lclimb != None else params["LCLIMB"]
params["LNEBCELL"] = args.lnebcell if "LNEBCELL" not in params or args.lnebcell != None else params["LNEBCELL"]
params["SPRING"] = args.spring if "SPRING" not in params or args.spring != None else params["SPRING"]
params["MAXMOVE"] = args.maxmove if "MAXMOVE" not in params or args.maxmove != None else params["MAXMOVE"]
params["LGLOBAL"] = args.lglobal if "LGLOBAL" not in params or args.lglobal != None else params["LGLOBAL"]
params["LAUTOSCALE"] = args.lautoscale if "LAUTOSCALE" not in params or args.lautoscale != None else params["LAUTOSCALE"]
params["INVCURV"] = args.invcurv if "INVCURV" not in params or args.invcurv != None else params["INVCURV"]
params["IMAGES"] = args.nimage if "IMAGES" not in params or args.images != None else params["IMAGES"]
params["LLINEOPT"] = args.llineopt if "LLINEOPT" not in params or args.llineopt != None else params["LLINEOPT"]
params["FDSTEP"] = args.fdstep if "FDSTEP" not in params or args.fdstep != None else params["FDSTEP"]
params["SYMPREC"] = args.symprec if "SYMPREC" not in params or args.symprec != None else params["SYMPREC"]
params["AMIX"] = args.amix if "AMIX" not in params or args.amix != None else params["AMIX"]
params["BMIX"] = args.bmix if "BMIX" not in params or args.bmix != None else params["BMIX"]
params["NELECT"] = args.nelect if "NELECT" not in params or args.nelect != None else params["NELECT"]
params["LAECHG"] = args.laechg if "LAECHG" not in params or args.laechg != None else params["LAECHG"]
params["LPARD"] = args.lpard if "LPARD" not in params or args.lpard != None else params["LPARD"]
params["LSEPK"] = args.lsepk if "LSEPK" not in params or args.lsepk != None else params["LSEPK"]
params["LSEPB"] = args.lsepb if "LSEPB" not in params or args.lsepb != None else params["LSEPB"]
params["NBMOD"] = args.nbmod if "NBMOD" not in params or args.nbmod != None else params["NBMOD"]
params["EINT"] = args.eint if "EINT" not in params or args.eint != None else params["EINT"]
params["LVHAR"] = args.lvhar if "LVHAR" not in params or args.lvhar != None else params["LVHAR"]
params["LEPSILON"] = args.lepsilon if "LEPSILON" not in params or args.lepsilon != None else params["LEPSILON"]
params["LPEAD"] = args.lpead if "LPEAD" not in params or args.lpead != None else params["LPEAD"]
params["LRPA"] = args.lrpa if "LRPA" not in params or args.lrpa != None else params["LRPA"]
if args.runtype == 0:
# static
from pymatflow.cmd.matflow import get_kpath
from pymatflow.vasp.static import StaticRun
task = StaticRun()
task.get_xyz(xyzfile)
task.set_params(params, runtype="static")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
if params["LNONCOLLINEAR"] != None:
if params["LNONCOLLINEAR"].upper() == ".TRUE." or params["LNONCOLLINEAR"].upper() == "T":
task.magnetic_status = "non-collinear"
if args.static == "scf":
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.scf(directory=args.directory, runopt=args.runopt, auto=args.auto)
elif args.static == "band":
if args.hse_in_scf.lower() == "true":
hse_in_scf = True
elif args.hse_in_scf.lower() == "false":
hse_in_scf = False
task.band(directory=args.directory, runopt=args.runopt, auto=args.auto, kpath=get_kpath(args.kpath_manual, args.kpath_file), hse_in_scf=hse_in_scf)
elif args.static == "dos":
if args.hse_in_scf.lower() == "true":
hse_in_scf = True
elif args.hse_in_scf.lower() == "false":
hse_in_scf = False
if args.kpoints_mp_nscf == None:
kpoints_mp_nscf = args.kpoints_mp #[2*k for k in args.kpoints_mp]
else:
kpoints_mp_nscf = args.kpoints_mp_nscf
task.dos(directory=args.directory, runopt=args.runopt, auto=args.auto, hse_in_scf=hse_in_scf, kpoints_mp_nscf=kpoints_mp_nscf)
elif args.static == "optics":
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.optics(directory=args.directory, runopt=args.runopt, auto=args.auto)
elif args.static == "bse":
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.bse(directory=args.directory, runopt=args.runopt, auto=args.auto, bse_level=args.bse_level, algo_gw=args.algo_gw)
elif args.static == "parchg" or args.static == "stm":
if args.hse_in_scf.lower() == "true":
hse_in_scf = True
elif args.hse_in_scf.lower() == "false":
hse_in_scf = False
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.parchg_stm(directory=args.directory, runopt=args.runopt, auto=args.auto, hse_in_scf=hse_in_scf)
elif args.runtype == 1:
# optimization
from pymatflow.vasp.opt import OptRun
#
if args.fix != None or args.fix_around_z != None:
# can only write xyz and poscar file
from pymatflow.cmd.structflow import read_structure
a = read_structure(filepath=xyzfile)
if args.fix != None:
fix = args.fix
elif args.fix_around_z != None:
atoms_index_from_1 = []
for i in range(len(a.atoms)):
if a.atoms[i].z > (args.fix_around_z[0] + args.fix_around_z[1]) and a.atoms[i].z < (args.fix_around_z[0] + args.fix_around_z[2]):
atoms_index_from_1.append(i+1)
fix = atoms_index_from_1
else:
fix = []
fix_str = ""
for i in fix:
fix_str += "%d " % i
os.system("xyz-fix-atoms.py -i %s -o %s --fix %s" % (xyzfile, xyzfile, fix_str))
args.selective_dynamics = "T"
# output an xsd file with fixed atoms colored specifically so that user can check the atoms fixed
from xml.etree.ElementTree import parse
from pymatflow.cmd.structflow import write_structure
os.system("mkdir -p /tmp/structflow/fix")
write_structure(a, filepath="/tmp/structflow/fix/tmp.xsd")
# read xsd file
xsd = parse("/tmp/structflow/fix/tmp.xsd")
# ID of Atom3D in xsd file start from 4
imap = xsd.getroot().find("AtomisticTreeRoot").find("SymmetrySystem").find("MappingSet").find("MappingFamily").find("IdentityMapping")
atoms = imap.findall("Atom3d")
if args.color_fix == "white":
RGB = [255, 255, 255]
elif args.color_fix == "red":
RGB = [255, 0, 0]
elif args.color_fix == "green":
RGB = [0, 255, 0]
elif args.color_fix == "blue":
RGB = [0, 0, 255]
else:
RGB = [255, 255, 255] # default
for i in fix:
atoms[i-1].set("Color", "%f, %f, %f, %f" % (RGB[0], RGB[1], RGB[2], 1))
# write xsd file
xsd.write(xyzfile+".coloring.atoms.fixed.xsd")
#
task = OptRun()
task.get_xyz(xyzfile)
task.set_params(params=params, runtype="opt")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.poscar.selective_dynamics = True if args.selective_dynamics.upper()[0] == "T" else False
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.set_cdcloud(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.optimize(directory=args.directory, runopt=args.runopt, auto=args.auto)
elif args.runtype == 2:
# cubic cell
from pymatflow.vasp.opt import OptRun
# some must set parameters
if params["IBRION"] == None:
params["IBRION"] = 2
params["ISIF"] = 2
if params["NSW"] == None:
params["NSW"] = 100
task = OptRun()
task.get_xyz(xyzfile)
task.set_params(params=params, runtype="opt")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.set_cdcloud(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.batch_a = args.batch_a
task.cubic(directory=args.directory, runopt=args.runopt, auto=args.auto, range_a=args.range_a)
elif args.runtype == 3:
# hexagonal cell
from pymatflow.vasp.opt import OptRun
task = OptRun()
task.get_xyz(xyzfile)
task.set_params(params=params, runtype="opt")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.set_cdcloud(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.batch_a = args.batch_a
task.batch_c = args.batch_c
task.hexagonal(directory=args.directory, runopt=args.runopt, auto=args.auto, range_a=args.range_a, range_c=args.range_c)
elif args.runtype == 4:
# tetragonal cell
from pymatflow.vasp.opt import OptRun
task = OptRun()
task.get_xyz(xyzfile)
task.set_params(params=params, runtype="opt")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.set_cdcloud(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.batch_a = args.batch_a
task.batch_c = args.batch_c
task.tetragonal(directory=args.directory, runopt=args.runopt, auto=args.auto, range_a=args.range_a, range_c=args.range_c)
elif args.runtype == 5:
# neb
# we better set NSW manually in VTST neb calc.
# if not set, pymatflow.vasp.neb will set it to 100 automatically
from pymatflow.vasp.neb import NebRun
task = NebRun()
task.get_images(images)
task.set_params(params=params, runtype="neb")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.nimage = args.nimage
if args.nebmake == 1 and args.moving_atom == None:
print("============================================\n")
print("when using nebmake.py to generate inter image\n")
print("you have to specify the moving atoms.\n")
print("index start from 0\n")
sys.exit(1)
task.nebmake = "nebmake.pl" if args.nebmake == 0 else "nebmake.py"
task.moving_atom = args.moving_atom
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.neb(directory=args.directory, runopt=args.runopt, auto=args.auto)
# move the OUTCAR for initial stucture and final structure to the corresponding dir
# if they are specified
if args.outcars != None and len(args.outcars) > 0:
os.system("cp %s %s" % (args.outcars[0], os.path.join(args.directory, "00/")))
os.system("cp %s %s" % (args.outcars[-1], os.path.join(args.directory, "%.2d/" % (args.nimage+1))))
elif args.runtype == 6:
# vasp phonon
from pymatflow.vasp.phonon import PhononRun
task = PhononRun()
task.get_xyz(xyzfile)
task.set_params(params=params, runtype="phonon")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.supercell_n = args.supercell_n
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.phonon(directory=args.directory, runopt=args.runopt, auto=args.auto)
elif args.runtype == 7:
# phonopy
from pymatflow.vasp.phonopy import PhonopyRun
task = PhonopyRun()
task.get_xyz(xyzfile)
task.set_params(params=params, runtype="phonopy")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.supercell_n = args.supercell_n
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.phonopy(directory=args.directory, runopt=args.runopt, auto=args.auto)
elif args.runtype == 8:
# sur pes
from pymatflow.flow.surface_pes import VaspRun
task = VaspRun()
task.get_xyz(xyzfile)
task.set_params(params=params, runtype="opt")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
#task.poscar.selective_dynamics = True if args.selective_dynamics.upper()[0] == "T" else False
task.poscar.selective_dynamics = True # always use selective_dynamics
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
#task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.batch_x_y = args.batch_x_y
task.set_pes(move_atom=args.move_atom, xrange=args.xrange, yrange=args.yrange, zshift=args.zshift, fix_z=args.fix_z, fix_y=args.fix_y, fix_x=args.fix_x)
task.run(directory=args.directory, runopt=args.runopt, auto=args.auto)
elif args.runtype == 9:
# abc cell
from pymatflow.vasp.opt import OptRun
task = OptRun()
task.get_xyz(xyzfile)
task.set_params(params=params, runtype="opt")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.batch_a = args.batch_a
task.batch_b = args.batch_b
task.batch_c = args.batch_c
task.abc(directory=args.directory, runopt=args.runopt, auto=args.auto, range_a=args.range_a, range_b=args.range_b, range_c=args.range_c)
elif args.runtype == 10:
# AIMD
from pymatflow.vasp.md import MdRun
task = MdRun()
task.get_xyz(xyzfile)
task.set_params(params=params, runtype="md")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
"""
ensemble:
0: NVE
1: NVT
2: NPT
3: NPH
thermostat:
0: Anderson
1: Nose-Hoover
2: Langevin
3: Multiple Anderson
"""
#task.incar.set_md(ensemble=ensemble, thermostat=thermostat)
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.md(directory=args.directory, runopt=args.runopt, auto=args.auto)
elif args.runtype == 11:
# vasp custom
from pymatflow.vasp.custom import CustomRun
task = CustomRun()
task.get_xyz(xyzfile)
task.set_params(params=params, runtype="custom")
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.set_run(mpi=args.mpi, server=server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn, queue=args.queue)
task.set_llhpc(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.set_cdcloud(partition=args.partition, nodes=args.nodes, ntask=args.ntask, jobname=args.jobname, stdout=args.stdout, stderr=args.stderr)
task.custom(directory=args.directory, runopt=args.runopt, auto=args.auto)
else:
pass | StarcoderdataPython |
1663051 | # usage - $python generate_anagrams.py foo bar
import sys
from random import shuffle
for word in sys.argv[1:]:
word = list(word)
anagrams = []
for i in range(10):
shuffle(word)
anagrams.append(''.join(word))
print ' '.join(anagrams)
| StarcoderdataPython |
154425 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Sistem Koperasi
import frappe
from frappe.utils import today, flt
@frappe.whitelist()
def dkh_get_permission_query_conditions(user=None):
if not user: user = frappe.session.user
return """(`tabDKH`.parent_sales_executive = '{}')""".format(user)
if user == "Administrator":
return
if "SC" in frappe.get_roles(user):
return """(`tabDKH`.parent_sales_executive = '{}')""".format(user)
@frappe.whitelist()
def getTerritory(territory):
return get_child_nodes("Territory", territory)
def get_child_nodes(group_type, root):
lft, rgt = frappe.db.get_value(group_type, root, ["lft", "rgt"])
return frappe.db.sql(""" Select name, lft, rgt from `tab{tab}` where
lft >= {lft} and rgt <= {rgt} order by lft""".format(tab=group_type, lft=lft, rgt=rgt), as_dict=1) | StarcoderdataPython |
1654318 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import time
import re
import boto3
def get_client(region=None):
"""Builds a client to the AWS Athena API."""
client = boto3.client('athena', region_name=region)
return client
def query(client, query, database, output):
response = client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': database
},
ResultConfiguration={
'OutputLocation': output,
}
)
execution_id = response['QueryExecutionId']
logging.info('Execution ID: %s', execution_id)
# Athena query is aync call, we need to fetch results and wait for execution
state = 'RUNNING'
max_execution = 5 # TODO: this should be an optional parameter from users. or use timeout
while (max_execution > 0 and state in ['RUNNING']):
max_execution = max_execution - 1
response = client.get_query_execution(QueryExecutionId = execution_id)
if 'QueryExecution' in response and \
'Status' in response['QueryExecution'] and \
'State' in response['QueryExecution']['Status']:
state = response['QueryExecution']['Status']['State']
if state == 'FAILED':
raise Exception('Athena Query Failed')
elif state == 'SUCCEEDED':
s3_path = response['QueryExecution']['ResultConfiguration']['OutputLocation']
# could be multiple files?
filename = re.findall('.*\/(.*)', s3_path)[0]
logging.info("S3 output file name %s", filename)
break
time.sleep(5)
# TODO:(@Jeffwan) Add more details.
result = {
'total_bytes_processed': response['QueryExecution']['Statistics']['DataScannedInBytes'],
'filename': filename
}
return result
def main():
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--region', type=str, help='Athena region.')
parser.add_argument('--database', type=str, required=True, help='The name of the database.')
parser.add_argument('--query', type=str, required=True, help='The SQL query statements to be executed in Athena.')
parser.add_argument('--output', type=str, required=False,
help='The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/')
args = parser.parse_args()
client = get_client(args.region)
results = query(client, args.query, args.database, args.output)
results['output'] = args.output
logging.info('Athena results: %s', results)
with open('/output.txt', 'w+') as f:
json.dump(results, f)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1687693 | from .. import VidStreamer
import cv2
import sys
if __name__ == '__main__':
streamer= VidStreamer.VidStreamer(verbose = True, _diffmin = 0)
streamer.set_partner(("10.50.3.181", 5000))
streamer.initCam()
streamer.cam.set_res(640,480)
if not streamer.connectPartner():
print("connectPartner failed")
sys.exit(0)
streamer.init_infoExchange()
streamer.initComps()
streamer.beginStreaming()
print("test1")
cv2.namedWindow("feed", cv2.WINDOW_NORMAL)
while True:
if not streamer.errorQueue_c.empty():
print("\nclientThread closed on error: {}\n".format(streamer.errorQueue_c.get(block=True, timeout= 2)))
streamer.close(destroy = True)
break
if cv2.waitKey(1) == 27:
streamer.close(destroy = True)
break # esc to quit
img= streamer.getCurrFrame()
cv2.imshow("feed", img)
| StarcoderdataPython |
3314226 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import logging
from django.db.utils import IntegrityError
from networkapi.admin_permission import AdminPermission
from networkapi.ambiente.models import Ambiente
from networkapi.ambiente.models import AmbienteNotFoundError
from networkapi.auth import has_perm
from networkapi.distributedlock import distributedlock
from networkapi.distributedlock import LOCK_IP_EQUIPMENT
from networkapi.equipamento.models import Equipamento
from networkapi.equipamento.models import EquipamentoError
from networkapi.equipamento.models import EquipamentoNotFoundError
from networkapi.exception import InvalidValueError
from networkapi.grupo.models import GrupoError
from networkapi.infrastructure.ipaddr import IPAddress
from networkapi.infrastructure.xml_utils import dumps_networkapi
from networkapi.infrastructure.xml_utils import loads
from networkapi.infrastructure.xml_utils import XMLError
from networkapi.ip.models import Ip
from networkapi.ip.models import IpCantBeRemovedFromVip
from networkapi.ip.models import IpCantRemoveFromServerPool
from networkapi.ip.models import IpEquipamento
from networkapi.ip.models import IpEquipamentoDuplicatedError
from networkapi.ip.models import IpEquipCantDissociateFromVip
from networkapi.ip.models import IpEquipmentNotFoundError
from networkapi.ip.models import IpError
from networkapi.ip.models import IpNotAvailableError
from networkapi.ip.models import IpNotFoundError
from networkapi.ip.models import NetworkIPv4NotFoundError
from networkapi.requisicaovips.models import ServerPoolMember
from networkapi.rest import RestResource
from networkapi.rest import UserNotAuthorizedError
from networkapi.util import destroy_cache_function
from networkapi.util import is_valid_int_greater_zero_param
from networkapi.util import is_valid_ipv4
from networkapi.util import is_valid_string_maxsize
from networkapi.util import is_valid_string_minsize
from networkapi.util import mount_ipv4_string
from networkapi.vlan.models import VlanError
from networkapi.vlan.models import VlanNotFoundError
def insert_ip(ip_map, user):
"""Insere um IP e o relacionamento entre o IP e o equipamento.
@param ip_map: Map com as chaves: id_equipamento, id_vlan e descricao
@param user: Usuário autenticado na API.
@return Em caso de erro retorna a tupla: (código da mensagem de erro, argumento01, argumento02, ...)
Em caso de sucesso retorna a tupla: (0, <mapa com os dados do IP>)
@raise VlanNotFoundError: VLAN não cadastrada.
@raise VlanError: Falha ao pesquisar a VLAN.
@raise EquipamentoNotFoundError: Equipamento não cadastrado.
@raise EquipamentoError: Falha ao pesquisar o Equipamento.
@raise IpNotAvailableError: Não existe um IP disponível para a VLAN.
@raise IpError: Falha ao inserir no banco de dados.
@raise UserNotAuthorizedError: Usuário sem autorização para executar a operação.
"""
log = logging.getLogger('insert_ip')
equip_id = ip_map.get('id_equipamento')
if not is_valid_int_greater_zero_param(equip_id):
log.error(
u'The equip_id parameter is not a valid value: %s.', equip_id)
raise InvalidValueError(None, 'equip_id', equip_id)
else:
equip_id = int(equip_id)
if not has_perm(user,
AdminPermission.IPS,
AdminPermission.WRITE_OPERATION,
None,
equip_id,
AdminPermission.EQUIP_WRITE_OPERATION):
raise UserNotAuthorizedError(
None, u'Usuário não tem permissão para executar a operação.')
vlan_id = ip_map.get('id_vlan')
if not is_valid_int_greater_zero_param(vlan_id):
log.error(u'The vlan_id parameter is not a valid value: %s.', vlan_id)
raise InvalidValueError(None, 'vlan_id', vlan_id)
else:
vlan_id = int(vlan_id)
desc_ip = ip_map.get('descricao')
if desc_ip is not None:
if not is_valid_string_maxsize(desc_ip, 100) or not is_valid_string_minsize(desc_ip, 3):
log.error(u'Parameter desc_ip is invalid. Value: %s.', desc_ip)
raise InvalidValueError(None, 'desc_ip', desc_ip)
ip = Ip()
ip.descricao = desc_ip
ip.create(user, equip_id, vlan_id, False)
ip_map = dict()
ip_map['id'] = ip.id
ip_map['id_redeipv4'] = ip.networkipv4.id
ip_map['oct4'] = ip.oct4
ip_map['oct3'] = ip.oct3
ip_map['oct2'] = ip.oct2
ip_map['oct1'] = ip.oct1
ip_map['descricao'] = ip.descricao
return 0, ip_map
def insert_ip_equipment(ip_id, equip_id, user):
"""Insere o relacionamento entre o IP e o equipamento.
@param ip_id: Identificador do IP.
@param equip_id: Identificador do equipamento.
@param user: Usuário autenticado.
@return: O ip_equipamento criado.
@raise IpError: Falha ao inserir.
@raise EquipamentoNotFoundError: Equipamento não cadastrado.
@raise IpNotFoundError: Ip não cadastrado.
@raise IpEquipamentoDuplicatedError: IP já cadastrado para o equipamento.
@raise EquipamentoError: Falha ao pesquisar o equipamento.
@raise UserNotAuthorizedError: Usuário sem autorização para executar a operação.
"""
if not has_perm(user,
AdminPermission.IPS,
AdminPermission.WRITE_OPERATION,
None,
equip_id,
AdminPermission.EQUIP_WRITE_OPERATION):
raise UserNotAuthorizedError(
None, u'Usuário não tem permissão para executar a operação.')
ip_equipment = IpEquipamento()
ip_equipment.create(user, ip_id, equip_id)
return ip_equipment
def remove_ip_equipment(ip_id, equipment_id, user):
""" Remove o relacionamento entre um ip e um equipamento.
@param ip_id: Identificador do IP.
@param equipment_id: Identificador do equipamento.
@param user: Usuário autenticado.
@return: Nothing.
@raise IpEquipmentNotFoundError: Relacionamento não cadastrado.
@raise IpEquipCantDissociateFromVip: Equip is the last balancer in a created Vip Request, the relationship cannot be removed.
@raise EquipamentoNotFoundError: Equipamento não cadastrado.
@raise IpError, GrupoError: Falha na pesquisa dos dados ou na operação de remover.
@raise UserNotAuthorizedError: Usuário sem autorização para executar a operação.
"""
if not has_perm(user,
AdminPermission.IPS,
AdminPermission.WRITE_OPERATION,
None,
equipment_id,
AdminPermission.EQUIP_WRITE_OPERATION):
raise UserNotAuthorizedError(
None, u'Usuário não tem permissão para executar a operação.')
IpEquipamento().remove(user, ip_id, equipment_id)
return
class IpResource(RestResource):
log = logging.getLogger('IpResource')
def handle_put(self, request, user, *args, **kwargs):
"""Trata as requisições de PUT para inserir o relacionamento entre IP e Equipamento.
URL: ip/<id_ip>/equipamento/<id_equipamento>/$
"""
try:
ip_id = kwargs.get('id_ip')
equip_id = kwargs.get('id_equipamento')
if not is_valid_int_greater_zero_param(ip_id):
self.log.error(
u'The ip_id parameter is not a valid value: %s.', ip_id)
raise InvalidValueError(None, 'ip_id', ip_id)
if not is_valid_int_greater_zero_param(equip_id):
self.log.error(
u'The equip_id parameter is not a valid value: %s.', equip_id)
raise InvalidValueError(None, 'equip_id', equip_id)
Ip.get_by_pk(ip_id)
with distributedlock(LOCK_IP_EQUIPMENT % (ip_id, equip_id)):
ip_equipment = insert_ip_equipment(ip_id, equip_id, user)
ipequipamento_map = dict()
ipequipamento_map['id'] = ip_equipment.id
map = dict()
map['ip_equipamento'] = ipequipamento_map
return self.response(dumps_networkapi(map))
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except IpNotFoundError:
return self.response_error(119)
except EquipamentoNotFoundError:
return self.response_error(117, equip_id)
except IpEquipamentoDuplicatedError:
return self.response_error(120)
except UserNotAuthorizedError:
return self.not_authorized()
except (IpError, EquipamentoError, GrupoError):
return self.response_error(1)
def handle_post(self, request, user, *args, **kwargs):
"""Trata as requisições de POST para inserir um IP e associá-lo a um equipamento.
URL: ip/
"""
try:
xml_map, attrs_map = loads(request.raw_post_data)
except XMLError, x:
self.log.error(u'Erro ao ler o XML da requisição.')
return self.response_error(3, x)
networkapi_map = xml_map.get('networkapi')
if networkapi_map is None:
return self.response_error(3, u'Não existe valor para a tag networkapi do XML de requisição.')
ip_map = networkapi_map.get('ip')
if ip_map is None:
return self.response_error(3, u'Não existe valor para a tag ip do XML de requisição.')
try:
response = insert_ip(ip_map, user)
if response[0] == 0:
return self.response(dumps_networkapi({'ip': response[1]}))
else:
return self.response_error(response[0])
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except VlanNotFoundError:
return self.response_error(116)
except NetworkIPv4NotFoundError, e:
return self.response_error(281)
except EquipamentoNotFoundError:
return self.response_error(117, ip_map.get('id_equipamento'))
except IpNotAvailableError, e:
return self.response_error(150, e.message)
except UserNotAuthorizedError:
return self.not_authorized()
except (IpError, VlanError, EquipamentoError, GrupoError), e:
return self.response_error(1, e)
except Exception, e:
return self.response_error(1, e)
def handle_delete(self, request, user, *args, **kwargs):
"""Treat DELETE requests to remove IP and Equipment relationship.
URL: ip/<id_ip>/equipamento/<id_equipamento>/$
"""
try:
ip_id = kwargs.get('id_ip')
equip_id = kwargs.get('id_equipamento')
if not is_valid_int_greater_zero_param(ip_id):
self.log.error(
u'The ip_id parameter is not a valid value: %s.', ip_id)
raise InvalidValueError(None, 'ip_id', ip_id)
if not is_valid_int_greater_zero_param(equip_id):
self.log.error(
u'The equip_id parameter is not a valid value: %s.', equip_id)
raise InvalidValueError(None, 'equip_id', equip_id)
Ip.get_by_pk(ip_id)
Equipamento.get_by_pk(equip_id)
with distributedlock(LOCK_IP_EQUIPMENT % (ip_id, equip_id)):
ipv4 = Ip.get_by_pk(ip_id)
equipament = Equipamento.get_by_pk(equip_id)
# Delete vlan's cache
destroy_cache_function([ipv4])
# delete equipment's cache
destroy_cache_function([equip_id], True)
server_pool_member_list = ServerPoolMember.objects.filter(
ip=ipv4)
if server_pool_member_list.count() != 0:
# IP associated with Server Pool
server_pool_name_list = set()
for member in server_pool_member_list:
item = '{}: {}'.format(
member.server_pool.id, member.server_pool.identifier)
server_pool_name_list.add(item)
server_pool_name_list = list(server_pool_name_list)
server_pool_identifiers = ', '.join(server_pool_name_list)
raise IpCantRemoveFromServerPool({'ip': mount_ipv4_string(ipv4), 'equip_name': equipament.nome, 'server_pool_identifiers': server_pool_identifiers},
'Ipv4 não pode ser disassociado do equipamento %s porque ele está sendo utilizando nos Server Pools (id:identifier) %s' % (equipament.nome, server_pool_identifiers))
remove_ip_equipment(ip_id, equip_id, user)
return self.response(dumps_networkapi({}))
except IpCantRemoveFromServerPool, e:
return self.response_error(385, e.cause.get('ip'), e.cause.get('equip_name'), e.cause.get('server_pool_identifiers'))
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except EquipamentoNotFoundError, e:
return self.response_error(117, e.message)
except IpEquipmentNotFoundError:
return self.response_error(118, ip_id, equip_id)
except IpNotFoundError:
return self.response_error(119)
except IpCantBeRemovedFromVip, e:
return self.response_error(319, 'ip', 'ipv4', ip_id)
except IpEquipCantDissociateFromVip, e:
return self.response_error(352, e.cause['ip'], e.cause['equip_name'], e.cause['vip_id'])
except UserNotAuthorizedError:
return self.not_authorized()
except (IpError, GrupoError, EquipamentoError, IntegrityError), e:
if isinstance(e.cause, IntegrityError):
# IP associated VIP
self.log.error(u'Failed to update the request vip.')
return self.response_error(354, ip_id)
else:
return self.response_error(1)
def handle_get(self, request, user, *args, **kwargs):
"""Treat requests GET to verify that the IP belongs to environment.
URLs: /ip/x1.x2.x3.x4/ambiente/<id_amb>
URLs: /ip/<ip>/ambiente/<id_amb>
"""
self.log.info('GET to verify that the IP belongs to environment')
try:
# User permission
if not has_perm(user, AdminPermission.IPS, AdminPermission.READ_OPERATION):
self.log.error(
u'User does not have permission to perform the operation.')
return self.not_authorized()
environment_id = kwargs.get('id_amb')
# Valid Environment ID
if not is_valid_int_greater_zero_param(environment_id):
self.log.error(
u'The id_environment parameter is not a valid value: %s.', environment_id)
raise InvalidValueError(None, 'id_environment', environment_id)
ip = kwargs.get('ip')
# Valid IP
if not is_valid_ipv4(ip):
self.log.error(u'Parameter ip is invalid. Value: %s.', ip)
raise InvalidValueError(None, 'ip', ip)
# Find Environment by ID to check if it exist
Ambiente.get_by_pk(environment_id)
# Existing IP
octs = str(IPAddress(ip, 4).exploded).split('.')
ip = Ip.get_by_octs_and_environment(
octs[0], octs[1], octs[2], octs[3], environment_id)
# Build dictionary return
ip_map = dict()
ip_map['id'] = ip.id
ip_map['id_vlan'] = ip.networkipv4.vlan.id
ip_map['oct4'] = ip.oct4
ip_map['oct3'] = ip.oct3
ip_map['oct2'] = ip.oct2
ip_map['oct1'] = ip.oct1
ip_map['descricao'] = ip.descricao
return self.response(dumps_networkapi({'ip': ip_map}))
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except IpNotFoundError:
return self.response_error(119)
except AmbienteNotFoundError:
return self.response_error(112)
except (IpError, GrupoError):
return self.response_error(1)
| StarcoderdataPython |
1687458 | import argparse
from utils.config import Config
from trainer.stage2trainer import Trainer as Stage2Trainer
class Solver():
def __init__(self, args):
self.robot_args = [args.obj_mesh_dir, args.num_obj, args.workspace_limits, args.heightmap_resolution]
self.logger_args = {
'continue_logging': args.continue_logging,
'logging_directory': args.logging_directory,
'workspace_limits': args.workspace_limits,
'heightmap_resolution': args.heightmap_resolution,
'continue_logging': args.continue_logging,
}
self.stage_2_trainer = Stage2Trainer(self.robot_args, self.logger_args)
def main(self):
self.stage_2_trainer.main()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Train robotic agents to learn visual language grasp.'
)
# Run main program with specified config file
parser.add_argument('-f', '--file', dest='file')
args = parser.parse_args()
solver = Solver(Config(args.file))
solver.main() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.