id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5726 | import uuid
import pickle
import pytest
import argparse
from collections import namedtuple
from six import text_type
from allure.common import AllureImpl, StepContext
from allure.constants import Status, AttachmentType, Severity, \
FAILED_STATUSES, Label, SKIPPED_STATUSES
from allure.utils import parent_module, parent_down_from_module, labels_of, \
all_of, get_exception_message, now, mangle_testnames
from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel
def pytest_addoption(parser):
parser.getgroup("reporting").addoption('--alluredir',
action="store",
dest="allurereportdir",
metavar="DIR",
default=None,
help="Generate Allure report in the specified directory (may not exist)")
severities = [v for (_, v) in all_of(Severity)]
def label_type(name, legal_values=set()):
"""
argparse-type factory for labelish things.
processed value is set of tuples (name, value).
:param name: of label type (for future TestLabel things)
:param legal_values: a `set` of values that are legal for this label, if any limit whatsoever
:raises ArgumentTypeError: if `legal_values` are given and there are values that fall out of that
"""
def a_label_type(string):
atoms = set(string.split(','))
if legal_values and not atoms < legal_values:
raise argparse.ArgumentTypeError('Illegal {} values: {}, only [{}] are allowed'.format(name, ', '.join(atoms - legal_values), ', '.join(legal_values)))
return set((name, v) for v in atoms)
return a_label_type
parser.getgroup("general").addoption('--allure_severities',
action="store",
dest="allureseverities",
metavar="SEVERITIES_SET",
default={},
type=label_type(name=Label.SEVERITY, legal_values=set(severities)),
help="""Comma-separated list of severity names.
Tests only with these severities will be run.
Possible values are:%s.""" % ', '.join(severities))
parser.getgroup("general").addoption('--allure_features',
action="store",
dest="allurefeatures",
metavar="FEATURES_SET",
default={},
type=label_type(name=Label.FEATURE),
help="""Comma-separated list of feature names.
Run tests that have at least one of the specified feature labels.""")
parser.getgroup("general").addoption('--allure_stories',
action="store",
dest="allurestories",
metavar="STORIES_SET",
default={},
type=label_type(name=Label.STORY),
help="""Comma-separated list of story names.
Run tests that have at least one of the specified story labels.""")
def pytest_configure(config):
reportdir = config.option.allurereportdir
if reportdir: # we actually record something
allure_impl = AllureImpl(reportdir)
testlistener = AllureTestListener(config)
pytest.allure._allurelistener = testlistener
config.pluginmanager.register(testlistener)
if not hasattr(config, 'slaveinput'):
# on xdist-master node do all the important stuff
config.pluginmanager.register(AllureAgregatingListener(allure_impl, config))
config.pluginmanager.register(AllureCollectionListener(allure_impl))
class AllureTestListener(object):
"""
Per-test listener.
Is responsible for recording in-test data and for attaching it to the test report thing.
The per-test reports are handled by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook.
"""
def __init__(self, config):
self.config = config
self.environment = {}
self.test = None
# FIXME: that flag makes us pre-report failures in the makereport hook.
# it is here to cope with xdist's begavior regarding -x.
# see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish
self._magicaldoublereport = hasattr(self.config, 'slaveinput') and self.config.getvalue("maxfail")
@pytest.mark.hookwrapper
def pytest_runtest_protocol(self, item, nextitem):
try:
# for common items
description = item.function.__doc__
except AttributeError:
# for doctests that has no `function` attribute
description = item.reportinfo()[2]
self.test = TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])),
description=description,
start=now(),
attachments=[],
labels=labels_of(item),
status=None,
steps=[],
id=str(uuid.uuid4())) # for later resolution in AllureAgregatingListener.pytest_sessionfinish
self.stack = [self.test]
yield
self.test = None
self.stack = []
def attach(self, title, contents, attach_type):
"""
Store attachment object in current state for later actual write in the `AllureAgregatingListener.write_attach`
"""
attach = Attach(source=contents, # we later re-save those, oh my...
title=title,
type=attach_type)
self.stack[-1].attachments.append(attach)
def dynamic_issue(self, *issues):
"""
Attaches ``issues`` to the current active case
"""
if self.test:
self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in issues])
def description(self, description):
"""
Sets description for the test
"""
if self.test:
self.test.description = description
def start_step(self, name):
"""
Starts an new :py:class:`allure.structure.TestStep` with given ``name``,
pushes it to the ``self.stack`` and returns the step.
"""
step = TestStep(name=name,
title=name,
start=now(),
attachments=[],
steps=[])
self.stack[-1].steps.append(step)
self.stack.append(step)
return step
def stop_step(self):
"""
Stops the step at the top of ``self.stack``
"""
step = self.stack.pop()
step.stop = now()
def _fill_case(self, report, call, pyteststatus, status):
"""
Finalizes with important data
:param report: py.test's `TestReport`
:param call: py.test's `CallInfo`
:param pyteststatus: the failed/xfailed/xpassed thing
:param status: a :py:class:`allure.constants.Status` entry
"""
[self.attach(name, contents, AttachmentType.TEXT) for (name, contents) in dict(report.sections).items()]
self.test.stop = now()
self.test.status = status
if status in FAILED_STATUSES:
self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report),
trace=report.longrepr or hasattr(report, 'wasxfail') and report.wasxfail)
elif status in SKIPPED_STATUSES:
skip_message = type(report.longrepr) == tuple and report.longrepr[2] or report.wasxfail
trim_msg_len = 89
short_message = skip_message.split('\n')[0][:trim_msg_len]
# FIXME: see pytest.runner.pytest_runtest_makereport
self.test.failure = Failure(message=(short_message + '...' * (len(skip_message) > trim_msg_len)),
trace=status == Status.PENDING and report.longrepr or short_message != skip_message and skip_message or '')
def report_case(self, item, report):
"""
Adds `self.test` to the `report` in a `AllureAggegatingListener`-understood way
"""
parent = parent_module(item)
# we attach a four-tuple: (test module ID, test module name, test module doc, environment, TestCase)
report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid,
parent.module.__name__,
parent.module.__doc__ or '',
self.environment,
self.test)))
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
"""
Decides when to actually report things.
pytest runs this (naturally) three times -- with report.when being:
setup <--- fixtures are to be initialized in this one
call <--- when this finishes the main code has finished
teardown <--- tears down fixtures (that still possess important info)
`setup` and `teardown` are always called, but `call` is called only if `setup` passes.
See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas.
The "other side" (AllureAggregatingListener) expects us to send EXACTLY ONE test report (it wont break, but it will duplicate cases in the report -- which is bad.
So we work hard to decide exact moment when we call `_stop_case` to do that. This method may benefit from FSM (we keep track of what has already happened via self.test.status)
Expected behavior is:
FAILED when call fails and others OK
BROKEN when either setup OR teardown are broken (and call may be anything)
PENDING if skipped and xfailed
SKIPPED if skipped and not xfailed
"""
report = (yield).get_result()
status = self.config.hook.pytest_report_teststatus(report=report)
status = status and status[0]
if report.when == 'call':
if report.passed:
self._fill_case(report, call, status, Status.PASSED)
elif report.failed:
self._fill_case(report, call, status, Status.FAILED)
# FIXME: this is here only to work around xdist's stupid -x thing when in exits BEFORE THE TEARDOWN test log. Meh, i should file an issue to xdist
if self._magicaldoublereport:
# to minimize ze impact
self.report_case(item, report)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'setup': # setup / teardown
if report.failed:
self._fill_case(report, call, status, Status.BROKEN)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'teardown':
# as teardown is always called for testitem -- report our status here
if not report.passed:
if self.test.status not in FAILED_STATUSES:
# if test was OK but failed at teardown => broken
self._fill_case(report, call, status, Status.BROKEN)
else:
# mark it broken so, well, someone has idea of teardown failure
# still, that's no big deal -- test has already failed
# TODO: think about that once again
self.test.status = Status.BROKEN
# if a test isn't marked as "unreported" or it has failed, add it to the report.
if not item.get_marker("unreported") or self.test.status in FAILED_STATUSES:
self.report_case(item, report)
def pytest_runtest_setup(item):
item_labels = set((l.name, l.value) for l in labels_of(item)) # see label_type
arg_labels = set().union(item.config.option.allurefeatures,
item.config.option.allurestories,
item.config.option.allureseverities)
if arg_labels and not item_labels & arg_labels:
pytest.skip('Not suitable with selected labels: %s.' % ', '.join(text_type(l) for l in sorted(arg_labels)))
class LazyInitStepContext(StepContext):
"""
This is a step context used for decorated steps.
It provides a possibility to create step decorators, being initiated before pytest_configure, when no AllureListener initiated yet.
"""
def __init__(self, allure_helper, title):
self.allure_helper = allure_helper
self.title = title
self.step = None
@property
def allure(self):
listener = self.allure_helper.get_listener()
# if listener has `stack` we are inside a test
# record steps only when that
# FIXME: this breaks encapsulation a lot
if hasattr(listener, 'stack'):
return listener
class AllureHelper(object):
"""
This object holds various utility methods used from ``pytest.allure`` namespace, like ``pytest.allure.attach``
"""
def __init__(self):
self._allurelistener = None # FIXME: this gets injected elsewhere, like in the pytest_configure
def get_listener(self):
return self._allurelistener
def attach(self, name, contents, type=AttachmentType.TEXT): # @ReservedAssignment
"""
Attaches ``contents`` to a current context with given ``name`` and ``type``.
"""
if self._allurelistener:
self._allurelistener.attach(name, contents, type)
def label(self, name, *value):
"""
A decorator factory that returns ``pytest.mark`` for a given label.
"""
allure_label = getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name))
return allure_label(*value)
def severity(self, severity):
"""
A decorator factory that returns ``pytest.mark`` for a given allure ``level``.
"""
return self.label(Label.SEVERITY, severity)
def feature(self, *features):
"""
A decorator factory that returns ``pytest.mark`` for a given features.
"""
return self.label(Label.FEATURE, *features)
def story(self, *stories):
"""
A decorator factory that returns ``pytest.mark`` for a given stories.
"""
return self.label(Label.STORY, *stories)
def issue(self, *issues):
"""
A decorator factory that returns ``pytest.mark`` for a given issues.
"""
return self.label(Label.ISSUE, *issues)
def dynamic_issue(self, *issues):
"""
Mark test ``issues`` from inside.
"""
if self._allurelistener:
self._allurelistener.dynamic_issue(*issues)
def description(self, description):
"""
Sets description for the test
"""
if self._allurelistener:
self._allurelistener.description(description)
def testcase(self, *testcases):
"""
A decorator factory that returns ``pytest.mark`` for a given testcases.
"""
return self.label(Label.TESTCASE, *testcases)
def step(self, title):
"""
A contextmanager/decorator for steps.
TODO: when moving to python 3, rework this with ``contextlib.ContextDecorator``.
Usage examples::
import pytest
def test_foo():
with pytest.allure.step('mystep'):
assert False
@pytest.allure.step('make test data')
def make_test_data_bar():
raise ValueError('No data today')
def test_bar():
assert make_test_data_bar()
@pytest.allure.step
def make_test_data_baz():
raise ValueError('No data today')
def test_baz():
assert make_test_data_baz()
@pytest.fixture()
@pytest.allure.step('test fixture')
def steppy_fixture():
return 1
def test_baz(steppy_fixture):
assert steppy_fixture
"""
if callable(title):
return LazyInitStepContext(self, title.__name__)(title)
else:
return LazyInitStepContext(self, title)
def single_step(self, text):
"""
Writes single line to report.
"""
if self._allurelistener:
with self.step(text):
pass
def environment(self, **env_dict):
if self._allurelistener:
self._allurelistener.environment.update(env_dict)
@property
def attach_type(self):
return AttachmentType
@property
def severity_level(self):
return Severity
def __getattr__(self, attr):
"""
Provides fancy shortcuts for severity::
# these are the same
pytest.allure.CRITICAL
pytest.allure.severity(pytest.allure.severity_level.CRITICAL)
"""
if attr in dir(Severity) and not attr.startswith('_'):
return self.severity(getattr(Severity, attr))
else:
raise AttributeError
MASTER_HELPER = AllureHelper()
def pytest_namespace():
return {'allure': MASTER_HELPER}
class AllureAgregatingListener(object):
"""
Listens to pytest hooks to generate reports for common tests.
"""
def __init__(self, impl, config):
self.impl = impl
# module's nodeid => TestSuite object
self.suites = {}
def pytest_sessionfinish(self):
"""
We are done and have all the results in `self.suites`
Lets write em down.
But first we kinda-unify the test cases.
We expect cases to come from AllureTestListener -- and the have ._id field to manifest their identity.
Of all the test cases in suite.testcases we leave LAST with the same ID -- becase logreport can be sent MORE THAN ONE TIME
(namely, if the test fails and then gets broken -- to cope with the xdist's -x behavior we have to have tests even at CALL failures)
TODO: do it in a better, more efficient way
"""
for s in self.suites.values():
if s.tests: # nobody likes empty suites
s.stop = max(case.stop for case in s.tests)
known_ids = set()
refined_tests = []
for t in s.tests[::-1]:
if t.id not in known_ids:
known_ids.add(t.id)
refined_tests.append(t)
s.tests = refined_tests[::-1]
with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f:
self.impl._write_xml(f, s)
self.impl.store_environment()
def write_attach(self, attachment):
"""
Writes attachment object from the `AllureTestListener` to the FS, fixing it fields
:param attachment: a :py:class:`allure.structure.Attach` object
"""
# OMG, that is bad
attachment.source = self.impl._save_attach(attachment.source, attachment.type)
attachment.type = attachment.type.mime_type
def pytest_runtest_logreport(self, report):
if hasattr(report, '_allure_result'):
module_id, module_name, module_doc, environment, testcase = pickle.loads(report._allure_result)
report._allure_result = None # so actual pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98
self.impl.environment.update(environment)
for a in testcase.iter_attachments():
self.write_attach(a)
self.suites.setdefault(module_id, TestSuite(name=module_name,
description=module_doc,
tests=[],
labels=[],
start=testcase.start, # first case starts the suite!
stop=None)).tests.append(testcase)
CollectFail = namedtuple('CollectFail', 'name status message trace')
class AllureCollectionListener(object):
"""
Listens to pytest collection-related hooks
to generate reports for modules that failed to collect.
"""
def __init__(self, impl):
self.impl = impl
self.fails = []
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
status = Status.BROKEN
else:
status = Status.CANCELED
self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split("::"))[-1],
status=status,
message=get_exception_message(None, None, report),
trace=report.longrepr))
def pytest_sessionfinish(self):
"""
Creates a testsuite with collection failures if there were any.
"""
if self.fails:
self.impl.start_suite(name='test_collection_phase',
title='Collection phase',
description='This is the tests collection phase. Failures are modules that failed to collect.')
for fail in self.fails:
self.impl.start_case(name=fail.name.split(".")[-1])
self.impl.stop_case(status=fail.status, message=fail.message, trace=fail.trace)
self.impl.stop_suite()
| StarcoderdataPython |
5086783 | """
adjusts module path to account for virtual namespace
This is required primarily for testing.
"""
import sys
import os
import pkg_resources
VIRTUAL_NAMESPACE = 'tiddlywebplugins'
local_package = os.path.abspath(VIRTUAL_NAMESPACE)
sys.modules[VIRTUAL_NAMESPACE].__dict__['__path__'].insert(0, local_package)
| StarcoderdataPython |
3415831 | <gh_stars>0
import os
import pprint
import pytest
from dotenv import load_dotenv, find_dotenv
from hvac import Client
from pydantic import ValidationError
from builder.train_builder import TrainBuilder
from builder.messages import BuildMessage, BuildStatus, BuilderCommands
from builder.tb_store import VaultEngines
@pytest.fixture
def test_user_id():
return "tb-test-user"
@pytest.fixture
def test_station_ids():
return [f"tb-test-station-{i + 1}" for i in range(3)]
@pytest.fixture
def build_msg(test_user_id, test_station_ids):
msg_dict = {
"id": "f54f58d9-58a1-4141-9dfb-a48b2a275998",
"type": "trainBuildStart",
"metadata": {},
"data": {
"user_id": test_user_id,
"user_rsa_secret_id": "rsa-test",
"id": "da<PASSWORD>",
"proposal_id": 4,
"stations": [
{
"id": "test-station",
"ecosystem": "tue",
"index": 1
}
],
"files": [
"test_train/entrypoint.py",
"test_train/requirements.txt"
],
"master_image": "python/slim",
"entrypointExecutable": "python",
"entrypoint_path": "test_train/entrypoint.py",
"session_id": "8203c4facff907d3bd83f8399e9a97aa4270e27acb4369f5bcdaab20643f2dc7c2ca8fe78a576c3ae7ac56b64d89a778aa86f7f90360734965dce0264ddcd705",
"hash": "91416369e845e7ff12efe8514736d468b71bfc15cc5ded92399a1a558f4317da68cfd5884cb9e5bbbac15ce45731afe4e47ced256c7a2e493ff7fad5481b8d31",
"hash_signed": "ace71ecae217b8da4426cee8ba8abeddab6d1d9c9d073e7c54197b82a1d453189ca2a3e278be7747b4e0fac28bba32dc1b5a4dbc4b060a2f5e659180367b56b90b6ee8f59f529206e39645acd0bd24c03c3ef291ac8ad91dbf4390541033656ec3ee48a516a94348cb60ed596be305c3754e7e4b66dc433bb47a2483ea7d772cc6a353bb43b82e4f35f7dc6ee1f502765d64785ea816b20eed6c3a1ea857a753d5048e16d395d3479b62d91c9870d9f19ee0740b6051a3089e5350227820281406d267e188ac4edb1f0f3ebd36a0aa6cb2eeeaaa71023b0e8e6381d3bc683277208a7c91de77d61e4a8ca8f71621449e564f57bb8eaef25f08da61e8b0b79f61",
"query": {
"query": "/Patient?",
"data": {
"output_format": "json",
"filename": "patients.json",
}
},
"user_he_key": "12345241",
"entrypoint_command": "run"
}
}
return BuildMessage(**msg_dict["data"])
@pytest.fixture
def train_files():
# todo generate tar file
pass
@pytest.fixture
def builder():
load_dotenv(find_dotenv())
builder = TrainBuilder()
return builder
def test_initialization_and_setup():
load_dotenv(find_dotenv())
tb = TrainBuilder()
def test_get_service_token(builder):
key = builder.service_key
client_id = builder.client_id
assert key, client_id
builder._get_service_credentials()
assert builder.service_key == key
assert builder.client_id == client_id
def test_make_docker_file(builder):
master_image = "python/base"
command = "python"
args = ["-c", "print('hello world')"]
entrypoint_file = "entrypoint.sh"
entrypoint_file_prefixed = "./entrypoint.sh"
train_dir = "/opt/pht_train"
results_dir = "/opt/pht_results"
docker_from = f"FROM {builder.registry_domain}/master/{master_image}\n"
directory_setup = f"RUN mkdir {train_dir} && mkdir {results_dir} && chmod -R +x {train_dir} \n"
docker_command_args = [f'"{arg}"' for arg in args]
docker_command_args = ", ".join(docker_command_args)
docker_command = f'CMD ["{command}", {docker_command_args}, "/opt/pht_train/{entrypoint_file}"]\n'
docker_file = docker_from + directory_setup + docker_command
docker_command_no_args = f'CMD ["{command}", "/opt/pht_train/{entrypoint_file}"]\n'
docker_file_no_args = docker_from + directory_setup + docker_command_no_args
docker_file_obj = builder._make_dockerfile(
master_image=master_image,
command=command,
command_args=args,
entrypoint_file=entrypoint_file)
assert docker_file == docker_file_obj.read().decode("utf-8")
docker_file_obj_prefixed = builder._make_dockerfile(
master_image=master_image,
command=command,
command_args=args,
entrypoint_file=entrypoint_file_prefixed)
assert docker_file == docker_file_obj_prefixed.read().decode("utf-8")
docker_file_obj_no_args = builder._make_dockerfile(
master_image=master_image,
command=command,
command_args=None,
entrypoint_file=entrypoint_file_prefixed)
assert docker_file_no_args == docker_file_obj_no_args.read().decode("utf-8")
# print(docker_file_obj.read().decode("utf-8"))
docker_file_obj.seek(0)
image, logs = builder.docker_client.images.build(fileobj=docker_file_obj)
assert image
print(image, list(logs))
def test_process_status_message(builder):
train_id = "tb-test-train-id"
builder._setup()
builder.redis_store.set_build_status(train_id=train_id, status=BuildStatus.STARTED)
message = {
"type": "trainBuildStatus",
"data": {
"id": train_id,
}
}
response = builder.process_message(message)
assert response.type == BuildStatus.STARTED
# train not found
message = {
"type": "trainBuildStatus",
"data": {
"id": "wrong-id",
}
}
response = builder.process_message(message)
assert response.type == BuildStatus.NOT_FOUND
def test_generate_config(builder, build_msg):
# generate test user and secrets in vault
user_secrets = {
"rsa-test": os.urandom(32).hex(),
}
builder.vault_client.secrets.kv.v1.create_or_update_secret(
path=str(build_msg.user_id),
mount_point=VaultEngines.USERS.value,
secret=user_secrets
)
for station in build_msg.stations:
station_secret = {
"rsa_public_key": station.id.encode().hex(),
}
response = builder.vault_client.secrets.kv.v1.create_or_update_secret(
path=station.id,
mount_point=VaultEngines.STATIONS.value,
secret=station_secret
)
print(response.text)
config, query = builder.generate_config_and_query(build_msg)
assert config.creator.rsa_public_key == user_secrets["rsa-test"]
assert config
def test_build(builder, build_msg):
# train id not found
invalid_msg = {
"type": "trainBuildStart",
"data": {
"hello": "random"
}
}
response = builder.process_message(invalid_msg)
assert response.type == BuildStatus.NOT_FOUND
# invalid build data
invalid_msg = {
"type": "trainBuildStart",
"data": {
"id": "random",
"hello": "world"
}
}
response = builder.process_message(invalid_msg)
assert response.type == BuildStatus.FAILED
| StarcoderdataPython |
11276820 | <filename>recognizeApp/networks/v1.py
import os.path
import numpy as np
import matplotlib.image
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras.preprocessing import image
from keras import optimizers
import h5py
from keras.models import Model
from keras import applications
from skimage.io import imsave, imread
# dimensions of images.
img_width, img_height = [256] * 2
train_data_dir = 'new_data/train'
validation_data_dir = 'new_data/validation'
test_data_dir = 'data/test'
nb_train_samples = 1766
nb_validation_samples = 689
epochs = 10
batch_size = 16
def create_model():
# build the VGG16 network
base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=(256, 256, 3))
print('Model loaded.')
# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
# add the model on top of the convolutional base
model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
# set the first 15 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in model.layers[:15]:
layer.trainable = False
return model
def recognize(target):
model = create_model()
#loading weights
model.load_weights('/Users/alexivannikov/recognizeMelanoma/recognizeApp/networks/weights_new.h5')
if os.path.isfile(target):
img = image.load_img(target, target_size=(256, 256))
x = image.img_to_array(img)
x = x[np.newaxis, ...]
img2 = imread(target)
prediction = model.predict(x)
print(int(prediction))
print(prediction[0][0])
K.clear_session()
return prediction[0][0]
#nevus_model = create_model()
#train(nevus_model)
#recognize('Figura4-805x560.jpg')
| StarcoderdataPython |
12838888 | class Solution(object):
def sortColors(self, nums):
def triPartition(nums, target):
i,j,n = 0, 0,len(nums) -1
while j <= n:
if nums[j] < target:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j += 1
elif nums[j] > target:
nums[j], nums[n] = nums[n], nums[j]
n -=1
else:
j +=1
triPartition(nums, 1)
nums = [2,0,2,1,1,0]
Solution().sortColors(nums)
print(nums) | StarcoderdataPython |
221418 | <filename>shexer/model/shape.py
STARTING_CHAR_FOR_SHAPE_NAME = "@"
class Shape(object):
def __init__(self, name, class_uri, statements):
self._name = name
self._class_uri = class_uri
self._statements = statements
@property
def name(self):
return self._name
@property
def class_uri(self):
return self._class_uri
def yield_statements(self):
for a_statement in self._statements:
yield a_statement
def sort_statements(self, callback, reverse=False):
self._statements.sort(key=lambda x :callback(x), reverse=reverse)
| StarcoderdataPython |
6541925 | <reponame>gavindsouza/instagram-to-sqlite
import json
import os
from types import SimpleNamespace
from typing import Any
import click
from sqlite_utils.db import _hash
class Namespace(SimpleNamespace):
def __init__(self, *args, **kwargs: Any) -> None:
if args and isinstance(args[0], dict):
kwargs.update(**args[0])
for k, v in kwargs.items():
if isinstance(v, dict):
kwargs[k] = Namespace(**v)
super().__init__(**kwargs)
def _(text: Any) -> Any:
if isinstance(text, str):
try:
# ref: https://stackoverflow.com/a/66443662/10309266
return text.encode("latin-1").decode("utf-8")
except UnicodeEncodeError:
return text
if isinstance(text, list):
return [_(t) for t in text]
if isinstance(text, dict):
for k, v in text.items():
text[k] = _(v)
return text
CHAT = Namespace(
{
"meta": {
"column_order": [
"chat_room",
"title",
"thread_type",
"thread_path",
"is_still_participant",
]
},
"messages": {
"column_order": [
"id",
"chat_room",
"is_unsent",
"type",
"sender_name",
"users",
"content",
"photos",
"call_duration",
"share",
"timestamp_ms",
"videos",
"audio_files",
]
},
"reactions": {"column_order": ["id", "message_id", "reaction", "actor"]},
}
)
def save_my_chats(db, zf):
"""Generates 3 tables for: meta, messages & reactions"""
all_chats = [
f.filename
for f in zf.filelist
if f.filename.startswith("messages") and f.filename.endswith(".json")
]
tables_to_setup = filter(
lambda x: x not in db.table_names(),
["chats_meta", "chats_messages", "chats_reactions"],
)
with click.progressbar(all_chats, label="Saving Chats") as all_chats:
for filename in all_chats:
reaction_rows = []
path = os.path.splitext(filename)[0]
try:
chat_room = path.split(os.sep)[2]
except Exception:
# if the path doesnt fit the pattern, skip it - added while secret
# groups came in. but i had no data to test it with
continue
chat_content = json.load(zf.open(filename))
# 1. transform and insert meta data
meta_row = _(
{
"chat_room": chat_room,
"title": chat_content["title"],
"thread_type": chat_content["thread_type"],
"thread_path": chat_content["thread_path"],
"is_still_participant": chat_content["is_still_participant"],
}
)
db["chats_meta"].upsert(
meta_row,
pk="chat_room",
alter=True,
column_order=CHAT.meta.column_order,
)
# 2. transform and insert messages data
for x in chat_content["messages"]:
x.update({"chat_room": chat_room})
chat_rows = _(chat_content["messages"])
reactioned_messages = list(filter(lambda x: x.get("reactions"), chat_rows))
for rm in reactioned_messages:
rm["id"] = _hash(rm)
reactions = rm.pop("reactions", [])
for reaction in reactions:
reaction_rows.append({"message_id": rm["id"], **reaction})
db["chats_messages"].upsert_all(
reactioned_messages,
pk="id",
foreign_keys=[("chat_room", "chats_meta", "chat_room")],
column_order=CHAT.messages.column_order,
alter=True,
)
non_reactioned_messages = list(
filter(lambda x: not x.get("reactions"), chat_rows)
)
for nrm in non_reactioned_messages:
nrm["id"] = _hash(nrm)
db["chats_messages"].upsert_all(
non_reactioned_messages,
pk="id",
foreign_keys=[("chat_room", "chats_meta", "chat_room")],
column_order=CHAT.messages.column_order,
alter=True,
)
# 3. insert messages' reactions data
db["chats_reactions"].upsert_all(
reaction_rows,
hash_id="id",
foreign_keys=[("message_id", "chats_messages", "id")],
column_order=CHAT.reactions.column_order,
alter=True,
)
generate_indexes(db, tables_to_setup)
def generate_indexes(db, tables_to_setup):
if not tables_to_setup:
return
print("\nBuilding indexes", end="\x1b[1K\r")
if "chats_meta" in tables_to_setup:
db["chats_meta"].create_index(["chat_room"])
if "chats_messages" in tables_to_setup:
db["chats_messages"].create_index(["id", "chat_room", "sender_name"])
db["chats_messages"].enable_fts(["share", "content"])
if "chats_reactions" in tables_to_setup:
db["chats_reactions"].create_index(["id", "message_id"])
print("Indexes built")
| StarcoderdataPython |
125321 | <gh_stars>0
"""Workflow package."""
import os
import inspect
import json
from collections import OrderedDict
import logging
from collections import namedtuple
import copy_reg
import types
__version__ = 0.3
#############################################################################
# Enable pickling of instance methods.
#############################################################################
def reduce_method(m):
return (getattr, (m.__self__, m.__func__.__name__))
copy_reg.pickle(types.MethodType, reduce_method)
#############################################################################
# Setup logging.
#############################################################################
def setup_logger(name):
logger = logging.getLogger(name)
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.setLevel(logging.WARNING)
return logger
logger = setup_logger(__name__)
#############################################################################
# Workflow run function.
#############################################################################
def run(workflow, mapper=map):
"""Run the workflow."""
if not os.path.isdir(workflow.output_directory):
os.mkdir(workflow.output_directory)
if len(workflow.nodes) > 0:
for node in workflow.nodes:
run(node, mapper=mapper)
else:
try:
workflow.process()
except NotImplementedError:
mapper(workflow.execute, workflow.get_tasks())
#############################################################################
# Settings.
#############################################################################
class _NestedClassGetter(object):
"""See also:
http://stackoverflow.com/questions/1947904/how-can-i-pickle-a-nested-class-in-python/11493777#11493777
"""
def __call__(self, containing_class, class_name):
nested_class = getattr(containing_class, class_name)
nested_instance = _NestedClassGetter()
nested_instance.__class__ = nested_class
return nested_instance
class _MetaSettings(type):
"""Meta class for storing node settings."""
def __init__(cls, name, bases, attrs):
cls.allowed_attrs = [a for a in attrs.keys()
if not a.startswith('__')
and not callable(cls.__dict__[a])]
class BaseSettings(object):
"""Base class for storing node settings.
:raises: RuntimeError if one tries to set a setting that has not been
specified in the class.
"""
__metaclass__ = _MetaSettings
map = map
def __setattr__(self, name, value):
if name == 'map':
self.__dict__[name] = value
elif name not in self.__class__.allowed_attrs:
raise RuntimeError
self.__dict__[name] = value
def __reduce__(self):
"""See also:
http://stackoverflow.com/questions/1947904/how-can-i-pickle-a-nested-class-in-python/11493777#11493777
"""
state = self.__dict__.copy()
for key, value in self.items():
state[key] = value
return (_NestedClassGetter(),
(_BaseNode, self.__class__.__name__,),
state,)
def _keys(self):
"""Return list of sorted setting names."""
return sorted([key for key in self.__class__.allowed_attrs])
def items(self):
"""Return settings as a sorted list of key/value pairs."""
items = []
for key in self._keys():
try:
items.append((key, self.__dict__[key]))
except KeyError:
items.append((key, self.__class__.__dict__[key]))
return items
def to_json(self, indent=None):
"""Return json representation of settings.
Ordered alphabetically."""
ordered_dict = OrderedDict(self.items())
return json.dumps(ordered_dict, indent=indent)
def from_json(self, json_str):
"""Configure the settings from a json string."""
for key, value in json.loads(json_str).items():
self.__setattr__(key, value)
#############################################################################
# Private base node.
#############################################################################
class _BaseNode(object):
"""Base class for the processing nodes."""
class Settings(BaseSettings):
pass
def __init__(self):
self.settings = self.__class__.Settings()
self._output_directory = ''
self._parent = None
self.nodes = []
self.configure()
def configure(self):
"""Configure a meta node."""
pass
@property
def output_directory(self):
"""Return the node's working directory.
:returns: path to output directory
"""
if self._parent is None:
return os.path.join(self._output_directory,
self.__class__.__name__)
else:
return os.path.join(self._parent.output_directory,
self.__class__.__name__)
@output_directory.setter
def output_directory(self, directory):
"""Set the worflow's working directory.
:param directory: directory where the workflow will create sub-directories
:raises: RuntimeError if called on a node that is not the top-level one
"""
if self._parent is None:
self._output_directory = directory
else:
raise RuntimeError('Working directory cannot be set on a sub node.')
def process(self):
"""Process the node.
Override this function to implement custom processing logic.
This function does not get called if the execute and get_task
functions have been implemented.
This function is useful when it is difficult to set up a function and
input for a map(funciton, input) logic. For example when going from
many files to one.
"""
raise NotImplementedError
def get_tasks(self):
"""Return a list of task.
Or rather a list of tuples of inputs for each task.
Override this function to implement the desired input for the execution logic.
The execute command is called by the process fuction using map:
map(self.execute, self.get_tasks)
"""
raise NotImplementedError
def execute(self, task_input):
"""Execute a single task.
Override this function to implement the desired execution logic.
The execute command is called by the process fuction using map:
map(self.execute, self.get_tasks)
"""
raise NotImplementedError
def add_node(self, node):
"""Add a node to the meta node.
:param node: node to be added to the meta node
:returns: the added node
"""
node._parent = self
self.nodes.append(node)
return node
#############################################################################
# Input and output
#############################################################################
class FilePath(str):
"""Class for dealing with file paths.
Subclass of ``str``.
"""
@property
def exists(self):
"""Wether or not the file exists."""
return os.path.isfile(self)
def is_more_recent_than(self, other):
"""Wether or not the file is more recent than the other file."""
return os.path.getmtime(self) > os.path.getmtime(other)
class _InOne(object):
"""Base class for nodes that take one input."""
def __init__(self, input_obj):
self.input_obj = input_obj
@property
def input_file(self):
"""Return the input file name.
:returns: class:`workflow.FilePath`
"""
return FilePath(self.input_obj)
class _InMany(object):
"""Base class for nodes that take many inputs."""
def __init__(self, input_obj):
self.input_obj = input_obj
@property
def input_files(self):
"""Return list containing input file names / tuples of file names.
If the input_obj was a path or a node this function yields filenames.
If the input_obj was a tuple or list of paths/nodes this function
yields a tuple of filenames.
:returns: list of :class:`workflow.FilePath` instances or list of
tuples of :class:`workflow.FilePath` instances
"""
def yield_files(input_obj):
"""Recursive function for yielding files."""
if isinstance(input_obj, _OutMany):
# If the input object is an instance of _OutMany it will have
# access to the output_files property.
for fname in input_obj.output_files:
yield FilePath(fname)
elif hasattr(input_obj, '__iter__'):
# This comes after isinstance(input_obj, _OutMany) because some
# unit test make use of MagicMock that has an "__iter__"
# attribute.
# The input object is a tuple or list of input objects.
all_files = []
for iobj in input_obj:
all_files.append(yield_files(iobj))
for fnames in zip(*all_files):
yield fnames
else:
# At this point we assume that we have been given a path to an
# input directory.
for fname in os.listdir(input_obj):
yield FilePath(os.path.join(input_obj, fname))
return [f for f in yield_files(self.input_obj)]
class _OutMany(object):
"""Base class for nodes that return many outputs."""
@property
def output_files(self):
"""Return list of output file names.
:returns: list of :class:`workflow.FilePath` instances
"""
return [FilePath(os.path.join(self.output_directory, fname))
for fname in os.listdir(self.output_directory)]
def get_output_file(self, fname, enumerator=None):
"""Returns output file name.
This is a helper function to create meaningful output filenames.
:param fname: input file name
:param enumerator: unique id (useful if the input file names are not
unique)
:returns: :class:`workflow.FilePath`
"""
logger.info('fname: {}'.format(fname))
fname = os.path.basename(fname)
if enumerator is not None:
name, suffix = fname.split('.')
fname = '{}_{}.{}'.format(name, enumerator, suffix)
return FilePath(os.path.join(self.output_directory, fname))
class _OutOne(object):
"""Base class for nodes that produce one output."""
def __init__(self, output_obj):
self.output_obj = output_obj
@property
def output_file(self):
"""Return the output file name.
:returns: :class:`workflow.FilePath`
"""
return FilePath(self.output_obj)
#############################################################################
# Public nodes.
#############################################################################
Task = namedtuple('Task', ['input_file', 'output_file', 'settings'])
class OneToManyNode(_BaseNode, _InOne, _OutMany):
"""One to many processing node."""
def __init__(self, input_obj):
_InOne.__init__(self, input_obj)
# Run base code initialisation after in case _BaseNode.configure tries
# to access input_obj added by _InOne.__init__.
_BaseNode.__init__(self)
class ManyToManyNode(_BaseNode, _InMany, _OutMany):
"""Many to many processing node."""
def __init__(self, input_obj):
_InMany.__init__(self, input_obj)
# Run base code initialisation after in case _BaseNode.configure tries
# to access input_obj added by _InMany.__init__.
_BaseNode.__init__(self)
def get_tasks(self):
"""Return list of named tuples of input values for execute.
:returns: list of Task(input_file, output_file, settings)
"""
tasks = []
for input_fn in self.input_files:
output_fn = self.get_output_file(input_fn)
if output_fn.exists and output_fn.is_more_recent_than(input_fn):
continue
tasks.append(Task(input_fn, self.get_output_file(input_fn), self.settings))
return tasks
class ManyToOneNode(_BaseNode, _InMany, _OutOne):
"""Many to one processing node."""
def __init__(self, input_obj, output_obj):
_InMany.__init__(self, input_obj)
_OutOne.__init__(self, output_obj)
# Run base code initialisation after in case _BaseNode.configure tries
# to access input_obj/ouput_obj added by _InMany/_OutOne.__init__.
_BaseNode.__init__(self)
class OneToOneNode(_BaseNode, _InOne, _OutOne):
"""One to one node."""
def __init__(self, input_obj, output_obj):
_InOne.__init__(self, input_obj)
_OutOne.__init__(self, output_obj)
# Run base code initialisation after in case _BaseNode.configure tries
# to access input_obj/ouput_obj added by _InOne/_OutOne.__init__.
_BaseNode.__init__(self)
| StarcoderdataPython |
6605086 | <filename>pymoo/algorithms/so_de.py
import numpy as np
from pymoo.algorithms.genetic_algorithm import GeneticAlgorithm
from pymoo.docs import parse_doc_string
from pymoo.model.mating import Mating
from pymoo.model.population import Population
from pymoo.model.replacement import ImprovementReplacement
from pymoo.model.selection import Selection
from pymoo.operators.crossover.biased_crossover import BiasedCrossover
from pymoo.operators.crossover.differental_evolution_crossover import DifferentialEvolutionCrossover
from pymoo.operators.crossover.exponential_crossover import ExponentialCrossover
from pymoo.operators.sampling.latin_hypercube_sampling import LatinHypercubeSampling
from pymoo.operators.selection.random_selection import RandomSelection
from pymoo.util.display import SingleObjectiveDisplay
from pymoo.util.misc import parameter_less
from pymoo.util.termination.default import SingleObjectiveDefaultTermination
# =========================================================================================================
# Implementation
# =========================================================================================================
class DE(GeneticAlgorithm):
def __init__(self,
pop_size=100,
sampling=LatinHypercubeSampling(),
variant="DE/rand/1/bin",
CR=0.5,
F=0.3,
dither="vector",
jitter=False,
display=SingleObjectiveDisplay(),
**kwargs
):
"""
Parameters
----------
pop_size : {pop_size}
sampling : {sampling}
variant : {{DE/(rand|best)/1/(bin/exp)}}
The different variants of DE to be used. DE/x/y/z where x how to select individuals to be pertubed,
y the number of difference vector to be used and z the crossover type. One of the most common variant
is DE/rand/1/bin.
F : float
The weight to be used during the crossover.
CR : float
The probability the individual exchanges variable values from the donor vector.
dither : {{'no', 'scalar', 'vector'}}
One strategy to introduce adaptive weights (F) during one run. The option allows
the same dither to be used in one iteration ('scalar') or a different one for
each individual ('vector).
jitter : bool
Another strategy for adaptive weights (F). Here, only a very small value is added or
subtracted to the weight used for the crossover for each individual.
"""
mating = DifferentialEvolutionMating(variant=variant,
CR=CR,
F=F,
dither=dither,
jitter=jitter)
super().__init__(pop_size=pop_size,
sampling=sampling,
mating=mating,
survival=None,
display=display,
**kwargs)
self.default_termination = SingleObjectiveDefaultTermination()
def _next(self):
# make a step and create the offsprings
self.off = self.mating.do(self.problem, self.pop, self.n_offsprings, algorithm=self)
self.off.set("n_gen", self.n_gen)
# evaluate the offsprings
self.evaluator.eval(self.problem, self.off, algorithm=self)
# replace the individuals that have improved
self.pop = ImprovementReplacement().do(self.problem, self.pop, self.off)
# =========================================================================================================
# Selection and Mating
# =========================================================================================================
class DESelection(Selection):
def __init__(self, variant) -> None:
super().__init__()
self.variant = variant
def _do(self, pop, n_select, n_parents, **kwargs):
variant = self.variant
# create offsprings and add it to the data of the algorithm
P = RandomSelection().do(pop, n_select, n_parents)
F, CV = pop.get("F", "CV")
fitness = parameter_less(F, CV)[:, 0]
sorted_by_fitness = fitness.argsort()
best = sorted_by_fitness[0]
if variant == "best":
P[:, 0] = best
elif variant == "current-to-best":
P[:, 0] = np.arange(len(pop))
P[:, 1] = best
P[:, 2] = np.arange(len(pop))
elif variant == "current-to-rand":
P[:, 0] = np.arange(len(pop))
P[:, 2] = np.arange(len(pop))
elif variant == "rand-to-best":
P[:, 1] = best
P[:, 2] = np.arange(len(pop))
elif variant == "current-to-pbest":
n_pbest = int(np.ceil(0.1 * len(pop)))
pbest = sorted_by_fitness[:n_pbest]
P[:, 0] = np.arange(len(pop))
P[:, 1] = np.random.choice(pbest, len(pop))
P[:, 2] = np.arange(len(pop))
return P
class DifferentialEvolutionMating(Mating):
def __init__(self,
variant="DE/rand/1/bin",
CR=0.5,
F=0.3,
dither="vector",
jitter=False,
selection=None,
crossover=None,
mutation=None,
**kwargs):
_, sel, n_diff, mut, = variant.split("/")
self.variant = sel
self.n_diffs = int(n_diff)
if "-to-" in self.variant:
self.n_diffs += 1
if selection is None:
selection = DESelection(sel)
if mutation is None:
if mut == "exp":
mutation = ExponentialCrossover(CR)
elif mut == "bin":
mutation = BiasedCrossover(CR)
if crossover is None:
crossover = DifferentialEvolutionCrossover(n_diffs=self.n_diffs, weight=F, dither=dither, jitter=jitter)
super().__init__(selection, crossover, mutation, **kwargs)
def _do(self, problem, pop, n_offsprings, parents=None, **kwargs):
P = self.selection.do(pop, len(pop), self.crossover.n_parents)
# do the first crossover which is the actual DE operation
off = self.crossover.do(problem, pop, P, algorithm=self)
# then do the mutation (which is actually a crossover between old and new individual)
_pop = Population.merge(pop, off)
_P = np.column_stack([np.arange(len(pop)), np.arange(len(pop)) + len(pop)])
off = self.mutation.do(problem, _pop, _P, algorithm=self)[:len(pop)]
return off
parse_doc_string(DE.__init__)
| StarcoderdataPython |
8173916 | # -*- coding: utf-8 -*-
"""
.. module:: import_csv_for_factors
:synopsis: module importing data for SAOB
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import pandas as pd
def _common_read(csv_file, raters):
"""Reads data from a csv file containing parents and sometimes teachers and clinicians severity assessments of a disease and factor values.
This csv file contains all the data required to perform the SAOB.
Parameters
----------
csv_file: str
Name or localisation of the csv file that contains all the values of clinical trials required to perform the SAOB.
The csv file must have a specific form:
- eight first columns: Author, Year, Score Name, Number of patients, Raters (Parents or Teachers), Time (pre and post), Mean, Std;
- other columns correspond to factors to analyze;
- each study has at least 2 lines: Author name, Year, Score Name, Number of patients, Raters (Parents or Teachers) repeated twice,
Time (pre and post), Mean (at pre-test and post-test), Std (at pre-test and post-test). If Teachers assessments are available,
two lines must be added;
- sometimes, for a study, several clinical scales are available, they are all entered in the csv file;
- for each author, the 2 lines of the Raters column have to be filled as follows: Parents, Parents, if teachers' assessment is available
2 more lines are added (Teachers, Teachers);
- for each author, the 2 lines of the Time column have to be filled as follows: pre, post (pattern repeated one more time if teacher or
assessments is available);
- Mean and Std correspond to the clinical score extracted from studies at pre-test and post-test;
- Every additionnal column corresponds to a factor.
raters: str, 'Teachers' or 'Parents'
Person assessing the disease symptoms.
Returns
-------
df_values: pandas.DataFrame
Dataframe used to perform the SAOB.
Each row corresponds to a study rated by a specific rater on a specific scale.
Columns are: mean_post_test_treatment, mean_post_test_control, mean_pre_test_treatment, mean_pre_test_control, n_treatment,
n_control, std_post_test_treatment, std_post_test_control, std_pre_test_treatment, std_pre_test_control, raters for each study and factors.
"""
# Import a csv file as a dataframe
df = pd.read_csv(csv_file)
# Name of studies with an evaluation and its associated score
indices_name_studies = df.loc[ (df['Time'] == "pre")
& (df['Raters'] == raters),
['Mean', 'Std']
].isnull().sum(axis=1).index
name_studies = df.loc[indices_name_studies, 'Author']
score_name = df.loc[indices_name_studies, 'Score Name']
# Extract treatment values
treatment_indices_pre = df[
(df['Time'] == "pre")
& (df['Raters'] == raters)
].index
n_treatment = df.loc[treatment_indices_pre, 'Number of patients']
mean_pre_test_treatment = df.loc[treatment_indices_pre, 'Mean']
std_pre_test_treatment = df.loc[treatment_indices_pre, 'Std']
treatment_indices_post = df[
(df['Time'] == "post")
& (df['Raters'] == raters)
].index
mean_post_test_treatment = df.loc[treatment_indices_post, 'Mean']
std_post_test_treatment = df.loc[treatment_indices_post, 'Std']
# Extract factors
pblind = df.loc[treatment_indices_pre, 'Probably Blind']
number_of_sessions = df.loc[treatment_indices_pre, 'Number of sessions']
SMR = df.loc[treatment_indices_pre, 'SMR']
theta_up = df.loc[treatment_indices_pre, 'Theta up']
beta_up_central = df.loc[treatment_indices_pre, 'Beta up central']
theta_down = df.loc[treatment_indices_pre, 'Theta down']
beta_up_frontal = df.loc[treatment_indices_pre, 'Beta up frontal']
SCP = df.loc[treatment_indices_pre, 'SCP']
on_drugs = df.loc[treatment_indices_pre, 'On drugs during treatment assessments']
age_min = df.loc[treatment_indices_pre, 'Age min']
age_max = df.loc[treatment_indices_pre, 'Age max']
randomization = df.loc[treatment_indices_pre, 'Randomization']
IRB = df.loc[treatment_indices_pre, 'Institutional Review Board']
transfer_phase = df.loc[treatment_indices_pre, 'Transfer phase']
transfer_card = df.loc[treatment_indices_pre, 'Transfer card']
EOG_correction_or_rejection = df.loc[treatment_indices_pre, 'EOG correction or rejection']
amplitude_based_artifact_rejection = df.loc[treatment_indices_pre, 'Amplitude based artifact rejection']
thresholding = df.loc[treatment_indices_pre, 'Thresholding']
session_pace = df.loc[treatment_indices_pre, 'Session pace (per week)']
session_length = df.loc[treatment_indices_pre, 'Session length (min)']
treatment_length = df.loc[treatment_indices_pre, 'Treatment length (weeks)']
more_than_one_active_electrode = df.loc[treatment_indices_pre, '>1 active electrode']
EEG_quality = df.loc[treatment_indices_pre, 'EEG quality']
control_group = df.loc[treatment_indices_pre, 'Control group']
individualisation_iapf = df.loc[treatment_indices_pre, 'Indivualisation (iAPF)']
EMG_biofeedback = df.loc[treatment_indices_pre, 'EMG biofeedback']
engagement_with_treatment = df.loc[treatment_indices_pre, 'Engagement with treatment']
maximum_on_clinical_scale = df.loc[treatment_indices_pre, 'Maximum on clinical scale']
# Creation of the data frame containing the results
df_values = pd.DataFrame({'n_treatment': n_treatment.tolist(),
'score_name': score_name.tolist(),
'mean_pre_test_treatment': mean_pre_test_treatment.tolist(),
'mean_post_test_treatment': mean_post_test_treatment.tolist(),
'std_pre_test_treatment': std_pre_test_treatment.tolist(),
'std_post_test_treatment': std_post_test_treatment.tolist(),
'raters': raters,
'pblind': pblind.tolist(),
'number_of_sessions': number_of_sessions.tolist(),
'SMR': SMR.tolist(),
'theta_up': theta_up.tolist(),
'theta_down': theta_down.tolist(),
'beta_up_central': beta_up_central.tolist(),
'beta_up_frontal': beta_up_frontal.tolist(),
'SCP': SCP.tolist(),
'on_drugs': on_drugs.tolist(),
'age_min': age_min.tolist(),
'age_max': age_max.tolist(),
'randomization': randomization.tolist(),
'IRB': IRB.tolist(),
'transfer_phase': transfer_phase.tolist(),
'transfer_card': transfer_card.tolist(),
'EOG_correction_or_rejection': EOG_correction_or_rejection.tolist(),
'amplitude_based_artifact_rejection': amplitude_based_artifact_rejection.tolist(),
'thresholding': thresholding.tolist(),
'session_pace': session_pace.tolist(),
'session_length': session_length.tolist(),
'treatment_length': treatment_length.tolist(),
'more_than_one_active_electrode': more_than_one_active_electrode.tolist(),
'EEG_quality': EEG_quality.tolist(),
'control_group': control_group.tolist(),
'individualisation_iapf': individualisation_iapf.tolist(),
'EMG_biofeedback': EMG_biofeedback.tolist(),
'engagement_with_treatment': engagement_with_treatment.tolist(),
'maximum_on_clinical_scale': maximum_on_clinical_scale.tolist()},
index=[name_studies])
return df_values
def import_csv(csv_file):
"""Imports data from a csv file containing parents and sometimes teachers severity assessments of a disease and factor values.
This csv file contains all the data required to perform the SAOB.
Parameters
----------
csv_file: str
Name or localisation of the csv file that contains all the values of clinical trials required to perform the SAOB.
The csv file must have a specific form:
- eight first columns: Author, Year, Score Name, Number of patients, Raters (Parents or Teachers), Time (pre and post), Mean, Std;
- other columns correspond to factors to analyze;
- each study has at least 2 lines: Author name, Year, Score Name, Number of patients, Raters (Parents or Teachers) repeated twice,
Time (pre and post), Mean (at pre-test and post-test), Std (at pre-test and post-test). If Teachers assessments are available,
two lines must be added;
- sometimes, for a study, several clinical scales are available, they are all entered in the csv file;
- for each author, the 2 lines of the Raters column have to be filled as follows: Parents, Parents, if teachers' assessment is available
2 more lines are added (Teachers, Teachers);
- for each author, the 2 lines of the Time column have to be filled as follows: pre, post (pattern repeated one more time if teacher
assessments is available);
- Mean and Std correspond to the clinical score extracted from studies at pre-test and post-test.
- Every additionnal column corresponds to a factor.
Returns
-------
df_values_parents: pandas.DataFrame
Parents' ratings required to perform the SAOB.
Each row corresponds to a study and a specific clinical scale, disease symptoms are assessed by parents.
Columns are: mean_post_test_treatment, mean_post_test_control, mean_pre_test_treatment, mean_pre_test_control, n_treatment,
n_control, std_post_test_treatment, std_post_test_control, std_pre_test_treatment, std_pre_test_control, raters and factors.
df_values_teachers: pandas.DataFrame
Teachers' ratings required to perform the SAOB.
Each row corresponds to a study and a specific clinical scale, disease symptoms are assessed by teachers.
Columns are: mean_post_test_treatment, mean_post_test_control, mean_pre_test_treatment, mean_pre_test_control, n_treatment,
n_control, std_post_test_treatment, std_post_test_control, std_pre_test_treatment, std_pre_test_control, raters and factors.
"""
# Import parents ans teachers values
df_values_parents = _common_read(csv_file, raters='Parents')
df_values_teachers = _common_read(csv_file, raters='Teachers')
return df_values_parents, df_values_teachers
if __name__ == '__main__':
import_csv_for_factors('values_total_meta_analysis_all_factors.csv')
| StarcoderdataPython |
4813479 | <filename>memory_orig/inoutput_confirm.py<gh_stars>1-10
#์ผ๋จ์ ๋ฌธ์ ์ ์ฌ์ฉ์ ์
๋ ฅ ๋ต๋ณ์ด ์ผ์นํ๋ค๋ ๊ฐ์ ํ์~~
Qscreen = [
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]]
Ascreen = [
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 ],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]]
#๊ฒ์์์
while True:
#๊ฒ์์งํ
#์
์ถ๋ ฅํ์ธ์์
i = 0
for a in range(2,14):
for b in range(2, 30):
if Qscreen[a][b] != Ascreen[a][b]:
print("์คํจํ์
จ์ต๋๋ค.")
thehalgguenya = input("๊ฒ์์ ๋ค์ ์์ํ์๊ฒ ์ต๋๊น? (Y/N): ")
if thehalgguenya == "Y":
i = 1
break
elif thehalgguenya == "N":
i = 2
break
if i == 1:
pass
#์์ ์ฒซํ๋ฉด์ผ๋ก ๊ฐ์ ๋ค์ ์์
elif i == 0:
pass
#print("success")
#while๋ฃจํ ๋ค์๋์๊ฐ๋ฉด์ ๋ค์๋์ด๋ ์ถ๋ ฅ๋๋ฉฐ ๊ฒ์ ์งํ
elif i == 2:
pass
#์์ ๊ฒ์ ์ข
๋ฃ
| StarcoderdataPython |
8185100 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------------
# _____ _ _ ______ _
# (_____) | (_) (____ \ (_)
# _ ____ _ | |_ ____ ___ ____) )_ _ ____ ____ _ ____
# | | | _ \ / || | |/ _ |/ _ \ | __ (| | | |/ _ |/ _ | |/ _ )
# _| |_| | | ( (_| | ( ( | | |_| | | |__) ) |_| ( ( | ( ( | | ( (/ /
# (_____)_| |_|\____|_|\_|| |\___/ |______/ \____|\_|| |\_|| |_|\____)
# (_____| (_____(_____|
#
# file: __init__
# desc: The init for the Indigobuggie features.
#
# author: peter
# date: 13/10/2018
#---------------------------------------------------------------------------------
# Copyright (c) 2018 <NAME>
# All rights Reserved.
# Released Under the MIT Licence
#---------------------------------------------------------------------------------
from scm_feature import SCMFeature
from notes_feature import NotesFeature
from settings_feature import SettingsFeature
from my_tasks_feature import MyTasksFeature
from timekeeper_feature import TimeKeeperFeature
from code_review_feature import CodeReviewFeature
from source_tree_feature import SourceTreeFeature
supported_features=['SCMFeature',
'NotesFeature',
'SettingsFeature',
'MyTasksFeature',
'TimeKeeperFeature',
'CodeReviewFeature',
'SourceTreeFeature']
# vim: ts=4 sw=4 noexpandtab nocin ai
| StarcoderdataPython |
9639996 | """discriminator_on_related.py
The HasAddresses mixin will provide a relationship
to the fixed Address table based on a fixed association table.
The association table will also contain a "discriminator"
which determines what type of parent object associates to the
Address row.
This is a "polymorphic association". Even though a "discriminator"
that refers to a particular table is present, the extra association
table is used so that traditional foreign key constraints may be used.
This configuration has the advantage that a fixed set of tables
are used, with no extra-table-per-parent needed. The individual
Address record can also locate its parent with no need to scan
amongst many tables.
"""
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy import create_engine, Integer, Column, \
String, ForeignKey, Table
from sqlalchemy.orm import Session, relationship, backref
from sqlalchemy.ext.associationproxy import association_proxy
class Base(object):
"""Base class which provides automated table name
and surrogate primary key column.
"""
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
Base = declarative_base(cls=Base)
class AddressAssociation(Base):
"""Associates a collection of Address objects
with a particular parent.
"""
__tablename__ = "address_association"
@classmethod
def creator(cls, discriminator):
"""Provide a 'creator' function to use with
the association proxy."""
return lambda addresses:AddressAssociation(
addresses=addresses,
discriminator=discriminator)
discriminator = Column(String)
"""Refers to the type of parent."""
@property
def parent(self):
"""Return the parent object."""
return getattr(self, "%s_parent" % self.discriminator)
class Address(Base):
"""The Address class.
This represents all address records in a
single table.
"""
association_id = Column(Integer,
ForeignKey("address_association.id")
)
street = Column(String)
city = Column(String)
zip = Column(String)
association = relationship(
"AddressAssociation",
backref="addresses")
parent = association_proxy("association", "parent")
def __repr__(self):
return "%s(street=%r, city=%r, zip=%r)" % \
(self.__class__.__name__, self.street,
self.city, self.zip)
class HasAddresses(object):
"""HasAddresses mixin, creates a relationship to
the address_association table for each parent.
"""
@declared_attr
def address_association_id(cls):
return Column(Integer,
ForeignKey("address_association.id"))
@declared_attr
def address_association(cls):
discriminator = cls.__name__.lower()
cls.addresses= association_proxy(
"address_association", "addresses",
creator=AddressAssociation.creator(discriminator)
)
return relationship("AddressAssociation",
backref=backref("%s_parent" % discriminator,
uselist=False))
class Customer(HasAddresses, Base):
name = Column(String)
class Supplier(HasAddresses, Base):
company_name = Column(String)
engine = create_engine('sqlite://', echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
session.add_all([
Customer(
name='customer 1',
addresses=[
Address(
street='123 anywhere street',
city="New York",
zip="10110"),
Address(
street='40 main street',
city="San Francisco",
zip="95732")
]
),
Supplier(
company_name="<NAME>",
addresses=[
Address(
street='2569 west elm',
city="Detroit",
zip="56785")
]
),
])
session.commit()
for customer in session.query(Customer):
for address in customer.addresses:
print address
print address.parent | StarcoderdataPython |
11317458 | # <NAME>
# <EMAIL>
import numpy as np
import pandas as pd
from flam2millijansky.flam2millijansky import flam2millijansky
from hstphot.container import Container
def prepare_KN_nebular_spc(wavelength_angstrom,luminosity_per_angstrom,luminosity_distance_mpc,container):
"""
prepare_KN_nebular_spc function prepares a spectrum file to be in a format recognizable by JWST ETC.
#####
Required:
- pip install flam2millijansky, hstphot
- basic packages in python (e.g., numpy and pandas)
#####
+ Inputs:
- wavelength_angstrom = 1D array of wavelengths in Angstrom, sorted ascending.
- luminosity_per_angstrom = 1D array of luminosity in erg/s/A, parallel to wavelengths.
- luminosity_distance_mpc = a scalar for luminosity distance in Mpc unit.
- container = Container class for specifying the output paths. (See hstphot.container.Container; pip install hstphot).
#####
+ Outputs:
- return a dict with {'micron':values,'mjy':values}
- save to a file defined by container:
> filename: ./{0}/{1}_KN_{2}Mpc.dat where 0 = container.data['savefolder'], 1 = container.data['saveprefix'], and 2 = int(luminosity_distance_mpc).
> Column1 = micron
> Column2 = mjy
> sep = ' '
"""
wavelength_micron = wavelength_angstrom * 1e-4
luminosity_distance_cm = luminosity_distance_mpc * 1e6 * 3.086e18
flam = luminosity_per_angstrom / (4. * np.pi * np.power(luminosity_distance_cm,2))
mjy = flam2millijansky(wavelength_angstrom,flam)
m = np.argwhere(wavelength_micron > 0.).flatten()
out = {'micron':wavelength_micron[m],'mjy':mjy[m]}
out = pd.DataFrame(out)
savefolder,saveprefix = container.data['savefolder'],container.data['saveprefix']
string = './{0}/{1}_KN_{2}Mpc.dat'.format(savefolder,saveprefix,int(luminosity_distance_mpc))
out.to_csv(string,sep=' ',index=False)
print('Save {0}'.format(string))
return out
| StarcoderdataPython |
3334018 | <filename>data/transcoder_evaluation_gfg/python/MINIMUM_ROTATIONS_REQUIRED_GET_STRING.py
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( str ) :
tmp = str + str
n = len ( str )
for i in range ( 1 , n + 1 ) :
substring = tmp [ i : n ]
if ( str == substring ) :
return i
return n
#TOFILL
if __name__ == '__main__':
param = [
('vdevdNdQSopPtj',),
('5',),
('100010101011',),
('tlDOvJHAyMllu',),
('06',),
('101',),
('DYgtU',),
('4',),
('00',),
('Dt',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | StarcoderdataPython |
5005493 | # Title: Balanced Binary Tree
# Link: https://leetcode.com/problems/balanced-binary-tree/
from collections import deque
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Codec:
def serialize(self, root) -> str:
if not root:
return '[]'
ans = []
q = deque()
q.append(root)
while q:
node = q.popleft()
if node:
ans.append(str(node.val))
q.append(node.left)
q.append(node.right)
else:
ans.append('null')
i = -1
while True:
if ans[i] != 'null':
break
i -= 1
return f"[{','.join(ans[:len(ans)+i+1])}]"
def deserialize(self, data) -> TreeNode:
if data == '[]':
return None
data = data[1:-1].split(',')
data = list(reversed(data))
rows = deque()
next_rows = deque()
v = data.pop()
root = TreeNode(v)
rows.append(root)
while data:
while rows:
node = rows.popleft()
left_val = data.pop() if data else 'null'
right_val = data.pop() if data else 'null'
if left_val != 'null':
node.left = TreeNode(left_val)
next_rows.append(node.left)
if right_val != 'null':
node.right = TreeNode(right_val)
next_rows.append(node.right)
rows, next_rows = next_rows, deque()
return root
class Problem:
def isBalanced(self, root: TreeNode) -> bool:
if not root:
return True
return (abs(self.max_depth(root.left) - self.max_depth(root.right)) <= 1) and self.isBalanced(root.left) and self.isBalanced(root.right)
def max_depth(self, root: TreeNode) -> int:
if not root:
return 0
return max(self.max_depth(root.left), self.max_depth(root.right)) + 1
def solution():
codec = Codec()
root = codec.deserialize('[1,2,2,3,3,null,null,4,4]')
problem = Problem()
return problem.isBalanced(root)
def main():
print(solution())
if __name__ == '__main__':
main() | StarcoderdataPython |
11265901 | <reponame>cangoosechain/cats-swap
import os
import json
import hashlib
from script.utils.log import Log
class CdvTool(object):
def __init__(self):
log = Log("coin_maker_log")
self.log = log.logger
@staticmethod
def sha256(data_sha256):
return hashlib.sha256(data_sha256.encode(encoding='UTF-8')).hexdigest()
def build(self, coin_file):
build_cmd = "cdv clsp build %s" % coin_file
self.log.info(build_cmd)
build_result = os.popen(build_cmd).read().strip()
return build_result
def curry(self, hex_file_name, param_list):
for i in range(len(param_list)):
param_list[i] = "-a %s" % param_list[i]
param_list_merge_str = " ".join(param_list)
curry_cmd = "cdv clsp curry %s %s" % (hex_file_name, param_list_merge_str)
curry_cmd_treehash = "%s --treehash" % curry_cmd
curry_cmd_x = "%s -x" % curry_cmd
self.log.info(curry_cmd_treehash)
puzzle_hash = os.popen(curry_cmd_treehash).read().strip()
self.log.info(curry_cmd_x)
puzzle_reveal = os.popen(curry_cmd_x).read().strip()
return puzzle_hash, puzzle_reveal, curry_cmd
def encode(self, puzzle_hash, prefix):
encode_cmd = "cdv encode %s --prefix %s" % (puzzle_hash, prefix)
self.log.info(encode_cmd)
address = os.popen(encode_cmd).read().strip()
return address
def chia_wallet_send(self, amount_for_wallet_send, address):
chia_wallet_send_cmd = "chia wallet send -a %s -t %s" % (
amount_for_wallet_send, address)
self.log.info(chia_wallet_send_cmd)
result = os.popen(chia_wallet_send_cmd).read().strip()
return result
def coinrecords(self, sign, items):
coinrecords_cmd = "cdv rpc coinrecords --by %s %s " % (
sign, " ".join(items))
coins = os.popen(coinrecords_cmd).read().strip()
coins = coins.replace("'", '"')
coins = coins.replace("True", "true").replace("False", "false")
coins = json.loads(coins)
return coins
def opc(self, params_list):
solution_list = []
for params in params_list:
opc_cmd = "opc '(%s)'" % (" ".join(params))
self.log.info(opc_cmd)
solution = os.popen(opc_cmd).read().strip()
self.log.info(solution)
solution_list.append(solution)
return solution_list
def coin_info(self, amount, parent_coin_info, puzzle_hash, puzzle_reveal, solution):
coin_json = {
"coin": {
"amount": amount,
"parent_coin_info": parent_coin_info,
"puzzle_hash": puzzle_hash
},
"puzzle_reveal": puzzle_reveal,
"solution": solution
}
return coin_json
def spend_bundle(self, coin_spends, aggregated_signature):
spend_bundle_file = open("temp_spend_bundle.json", "w")
spend_bundle_json = {
"coin_spends": coin_spends,
"aggregated_signature": aggregated_signature
}
json.dump(spend_bundle_json, spend_bundle_file)
spend_bundle_file.close()
self.log.info("make temp_spend_bundle.json finish")
def get_parent_coin_info(self, coins):
coins_parent_coin_info = []
for each_coin in coins:
parent_coin_info = each_coin["coin"]["parent_coin_info"]
coins_parent_coin_info.append(parent_coin_info)
coins_parent_coin_info = list(set(coins_parent_coin_info))
self.log.info("get coins_parent_coin_info: %s" %
coins_parent_coin_info)
return coins_parent_coin_info
def coin_tree_upward_find(self, sign, items, all_tree_coins=[]):
coins = self.coinrecords(sign, items)
all_tree_coins.extend(coins)
self.log.info("get coins: %s" % json.dumps(coins))
coins_parent_coin_info = self.get_parent_coin_info(coins)
if len(coins_parent_coin_info) > 0:
self.coin_tree_upward_find(
"id", coins_parent_coin_info, all_tree_coins)
else:
self.log.info("get coins_tree: %s" % json.dumps(all_tree_coins))
| StarcoderdataPython |
1773995 | def answer():
return 42
class School():
def food(self):
return 'awful'
def age(self):
return 300
| StarcoderdataPython |
6649235 | #!/usr/bin/python
#-*- coding: utf-8 -*-
from unscrapulous.utils import *
SOURCE = 'https://www1.nseindia.com/invest/dynaContent/arbitration_award.jsp?requestPage=main&qryFlag=yes'
OUTPUT_DIR = '/tmp/unscrapulous/files'
OUTPUT_FILE = 'arbitration-awards-nse.csv'
def main(conn, session):
create_dir(OUTPUT_DIR)
soup = get_soup(SOURCE, session)
table = get_table(
soup,
{
'class' : 'tabular_data'
}
)
convert_into_csv([OUTPUT_FILE], OUTPUT_DIR, table=table)
# TODO: download data from range of 1 year and paginate
# write_to_db() is behaving different here. Need to fix
# alias = {
# 'Name': 'Name of the Applicant',
# 'AddedDate': 'Date of Arbitration Award'
# }
# write_to_db(conn, os.path.join(OUTPUT_DIR, OUTPUT_FILE), SOURCE, alias)
| StarcoderdataPython |
3333773 | <filename>Assignment_2/naive_bayes.py
# Multinomial Event Model
# Given a review predict the rating (1-10)
# y is Multinomial phi1 to phi10
# Every position has same multinomial theta1 to theta|V|
import itertools
import math
import matplotlib.pyplot as plt
import numpy as np
import re
import sys
import random
from collections import Counter
from tqdm import tqdm
import pickle
# TODO code should work even if tqdm is absent
def clean(string):
string = string.strip()
string1 = re.sub("[^a-z0-9]", " ", string) # removing all accept letters and numbers
string2 = re.sub("[^a-z0-9]", "", string) # removing all accept letters and numbers
return list(set(string1.split() + string2.split()))
def read_data(review_file, rating_file):
print("Reading Files \'%s\' and \'%s\'\n" % (review_file, rating_file))
data = []
with open(review_file, 'r') as rev, open(rating_file, 'r') as rt:
for review, rating in zip(rev, rt):
rating = int(rating)
review = clean(review) # clean review and return a list of words
data.append((rating, review))
return data
def format_data(plain_data):
data = {}
for rating, review in plain_data:
if rating not in data:
data[rating] = {"words": list(review), "num_of_samples": 1}
else:
data[rating]["words"] += review
data[rating]["num_of_samples"] += 1
for rating in data:
data[rating]["num_of_words"] = len(data[rating]["words"])
data[rating]["words"] = Counter(data[rating]["words"])
return data
def get_vocab(data):
v = Counter([])
for rating in data:
v += data[rating]["words"]
return v
phis = {}
thetas = {}
notlist = {
1: 3,
2: 4,
3: 7,
4: 7,
7: 2,
8: 1,
9: 1,
10: 1
}
negations = ["neiter", "nor", "nothing", "didnt", "not", "never", "nope", "none", "no", "nobody", "noway", "nah", "aint"]
def predict(review, c):
probs = [0 for i in range(0, num_classes)]
# probs = np.zeros([num_classes, ])
classes = list(data.keys())
probs = dict(zip(classes, probs))
for cls in probs:
# log(phi_cls)
if cls not in phis:
phis[cls] = math.log10((data[cls]["num_of_samples"] + c) / (total_num_of_samples + c * num_classes))
probs[cls] += phis[cls]
if cls not in thetas:
thetas[cls] = {}
review.sort()
for i in range(len(review)):
word = review[i]
# log(theta_word_cls)
if word not in thetas[cls]:
thetas[cls][word] = math.log10((data[cls]["words"][word] + c) / (data[cls]["num_of_words"] + c * V))
if (i == 0 or i == 1 or i == 2):
probs[cls] += 2 * thetas[cls][word]
elif (i >= 1 and review[i - 1] in negations) or (i >= 2 and review[i - 2] in negations):
probs[cls] -= (thetas[cls][word])
else:
probs[cls] += thetas[cls][word]
keys = list(probs.keys())
max_cls = keys[0]
for cls in probs:
if probs[cls] > probs[max_cls]:
max_cls = cls
return max_cls
def run(dataset, method='naive_bayes', confusion=False):
count = 0
num_samples = len(dataset)
correct_prediction = 0
for actual_cls, review in tqdm(dataset):
count += 1
# print(count)
if method == "naive_bayes":
prediction = predict(review, 1)
if actual_cls == prediction:
correct_prediction += 1
if confusion:
if prediction > 4:
prediction -= 2
if actual_cls > 4:
actual_cls -= 2
cf_mat[actual_cls - 1][prediction - 1] += 1
elif method == "random":
if actual_cls == random_prediction():
correct_prediction += 1
elif method == "maxcls":
if actual_cls == maxcls:
correct_prediction += 1
return (correct_prediction / num_samples) * 100
def random_prediction():
classes = list(data.keys())
i = random.randint(0, 7)
return classes[i]
def plot_confusion_matrix(cm, classes, title='Confusion matrix', cmap=plt.cm.Blues):
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], '0.2f'),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if len(sys.argv) == 2 and sys.argv[1] == "stemmed":
training_data = read_data("imdb/imdb_train_text_stemmed.txt", "imdb/imdb_train_labels.txt")
testing_data = read_data("imdb/imdb_test_text_stemmed.txt", "imdb/imdb_test_labels.txt")
output_file = "models/naive_bayes_stemmed.model"
else:
training_data = read_data("imdb/imdb_train_text.txt", "imdb/imdb_train_labels.txt")
testing_data = read_data("imdb/imdb_test_text.txt", "imdb/imdb_test_labels.txt")
output_file = "models/naive_bayes.model"
data = format_data(training_data)
num_classes = len(data)
vocab = get_vocab(data)
V = len(vocab)
total_num_of_samples = 0
for rating in data:
total_num_of_samples += data[rating]["num_of_samples"]
cf_mat = np.zeros([8, 8]) # confusion_matrix
print("Running on Training data")
train_accuracy = run(training_data)
print("Training Accuracy: %f\n" % (train_accuracy))
print("Running on Testing data")
test_accuracy = run(testing_data, confusion=True)
print("Test Accuracy: %f\n" % (test_accuracy))
print("Random Prediction on Test Set")
test_accuracy = run(testing_data, method="random")
print("Accuracy: %f\n" % (test_accuracy))
print("Majority Prediction on Test Set")
maxcls = list(data.keys())[0]
for cls in data:
if data[cls]["num_of_samples"] > data[maxcls]["num_of_samples"]:
maxcls = cls
test_accuracy = run(testing_data, method="maxcls")
print("Accuracy: %f\n" % (test_accuracy))
# Confusion Matrix
classes = list(data.keys())
classes.sort()
plt.figure()
plot_confusion_matrix(cf_mat, classes=classes, title='Confusion matrix') # , cmap=plt.cm.viridis_r)
plt.show()
with open(output_file, "wb") as f:
for cls in data:
del data[cls]["words"]
pickle.dump((phis, thetas, V, data), f)
| StarcoderdataPython |
5127389 | <reponame>legacyai/tf-transformers
# coding=utf-8
# Copyright 2021 TF-Transformers Authors and The TensorFlow Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main wrapper around ViT"""
from typing import Optional, Union
from absl import logging
from tf_transformers.core import ModelWrapper
from tf_transformers.core.read_from_hub import (
get_config_cache,
get_config_only,
load_pretrained_model,
)
from tf_transformers.models.vit import ViTEncoder as Encoder
from tf_transformers.models.vit.configuration_vit import ViTConfig as ModelConfig
from tf_transformers.models.vit.convert import convert_vit_pt as convert_pt
from tf_transformers.utils.docstring_file_utils import add_start_docstrings
from tf_transformers.utils.docstring_utils import (
ENCODER_MODEL_CONFIG_DOCSTRING,
ENCODER_PRETRAINED_DOCSTRING,
)
MODEL_TO_HF_URL = {
'vit-large-patch32-384': "tftransformers/vit-large-patch32-384",
'vit-base-patch32-384': "tftransformers/vit-base-patch32-384",
'vit-large-patch16-224': "tftransformers/vit-large-patch16-224",
'vit-base-patch16-224': "tftransformers/vit-base-patch16-224",
'vit-large-patch32-224-in21k': "tftransformers/vit-large-patch32-224-in21k",
'vit-base-patch32-224-in21k': "tftransformers/vit-base-patch32-224-in21k",
'google/vit-large-patch32-384': "tftransformers/vit-large-patch32-384",
'google/vit-base-patch32-384': "tftransformers/vit-base-patch32-384",
'google/vit-large-patch16-224': "tftransformers/vit-large-patch16-224",
'google/vit-base-patch16-224': "tftransformers/vit-base-patch16-224",
'google/vit-large-patch32-224-in21k': "tftransformers/vit-large-patch32-224-in21k",
'google/vit-base-patch32-224-in21k': "tftransformers/vit-base-patch32-224-in21k",
}
code_example = r'''
>>> from tf_transformers.models import ViTFeatureExtractorTF
>>> from tf_transformers.models import ViTModel
>>> image_path_list = # List fo image paths
>>> model_name = 'google/vit-base-patch16-224'
>>> feature_extractor = ViTFeatureExtractorTF(img_height=224, img_width=224)
>>> model = ViTModel.from_pretrained(model_name, classification_labels=1000)
>>> input_features = feature_extractor({'image': tf.constant(image_path_list)})
>>> model_outputs = model(input_features)
>>> predicted_class = tf.argmax(model_outputs['class_logits'], axis=-1)
'''
class ViTModel(ModelWrapper):
"""vit Encoder Wrapper"""
def __init__(self, model_name: str = 'vit', cache_dir: Union[str, None] = None, save_checkpoint_cache: bool = True):
"""
Args:
model_name (str): Model name
cache_dir (str): cache dir to save the mode checkpoints
"""
super(ViTModel, self).__init__(
model_name=model_name, cache_dir=cache_dir, save_checkpoint_cache=save_checkpoint_cache
)
def update_config(self, tft_config, hf_config):
"""Update tft config with hf config.
Args:
tft_config ([type]): [description]
hf_config ([type]): [description]
"""
tft_config["image_size"] = hf_config["image_size"]
tft_config["patch_size"] = hf_config["patch_size"]
tft_config["num_channels"] = hf_config["num_channels"]
tft_config["embedding_size"] = hf_config["hidden_size"]
tft_config["intermediate_size"] = hf_config["intermediate_size"]
tft_config["num_attention_heads"] = hf_config["num_attention_heads"]
tft_config["num_hidden_layers"] = hf_config["num_hidden_layers"]
try:
tft_config["num_labels"] = len(hf_config["id2label"])
except:
pass
return tft_config
@classmethod
def get_config(cls, model_name: str):
"""Get a config from Huggingface hub if present"""
# Check if it is under tf_transformers
if model_name in MODEL_TO_HF_URL:
URL = MODEL_TO_HF_URL[model_name]
config_dict = get_config_only(URL)
return config_dict
else:
# Check inside huggingface
config = ModelConfig()
config_dict = config.to_dict()
cls_ref = cls()
try:
from transformers import PretrainedConfig
hf_config = PretrainedConfig.from_pretrained(model_name)
hf_config = hf_config.to_dict()
config_dict = cls_ref.update_config(config_dict, hf_config)
return config_dict
except Exception as e:
logging.info("Error: {}".format(e))
logging.info("Failed loading config from HuggingFace")
@classmethod
@add_start_docstrings(
"ViT Model from config :",
ENCODER_MODEL_CONFIG_DOCSTRING.format("transformers.models.VitEncoder", "tf_transformers.models.vit.ViTConfig"),
)
def from_config(cls, config: ModelConfig, return_layer: bool = False, **kwargs):
if isinstance(config, ModelConfig):
config_dict = config.to_dict()
else:
config_dict = config # Dummy call to cls, as we need `_update_kwargs_and_config` function to be used here.
cls_ref = cls()
# if we allow names other than
# whats in the class, we might not be able
# to convert from hf properly.
if "name" in kwargs:
del kwargs["name"]
kwargs_copy = cls_ref._update_kwargs_and_config(kwargs, config_dict)
# if a config is provided, we wont be doing any extra .
# Just create a model and return it with random_weights
# (Distribute strategy fails)
model_layer = Encoder(config_dict, **kwargs_copy)
model = model_layer.get_model()
logging.info("Create model from config")
if return_layer:
return model_layer
return model
@classmethod
@add_start_docstrings(
"Bert Model Pretrained with example :",
ENCODER_PRETRAINED_DOCSTRING.format(
"tf_transformers.models.BertModel", "tf_transformers.models.BertEncoder", "bert-base-uncased", code_example
),
)
def from_pretrained(
cls,
model_name: str,
classification_labels: int = None,
cache_dir: Union[str, None] = None,
model_checkpoint_dir: Optional[str] = None,
convert_from_hf: bool = True,
return_layer: bool = False,
return_config: bool = False,
convert_fn_type: Optional[str] = "both",
save_checkpoint_cache: bool = True,
load_from_cache: bool = True,
skip_hub=False,
**kwargs,
):
# Load a base config and then overwrite it
cls_ref = cls(model_name, cache_dir, save_checkpoint_cache)
# Check if model is in out Huggingface cache
if model_name in MODEL_TO_HF_URL and skip_hub is False:
URL = MODEL_TO_HF_URL[model_name]
config_dict, local_cache = get_config_cache(URL)
kwargs_copy = cls_ref._update_kwargs_and_config(kwargs, config_dict)
if classification_labels:
logging.info("Using pretrained classifier layer with num_class={}".format(classification_labels))
model_layer = Encoder(config_dict, classification_labels=classification_labels, **kwargs_copy)
model = model_layer.get_model()
# Load Model
load_pretrained_model(model, local_cache, URL)
if return_layer:
if return_config:
return model_layer, config_dict
return model_layer
if return_config:
return model, config_dict
return model
config = ModelConfig()
config_dict = config.to_dict()
try:
from transformers import PretrainedConfig
hf_config = PretrainedConfig.from_pretrained(model_name)
hf_config = hf_config.to_dict()
config_dict = cls_ref.update_config(config_dict, hf_config)
except Exception as e:
logging.info("Error: {}".format(e))
logging.info("Failed loading config from HuggingFace")
# if we allow names other than
# whats in the class, we might not be able
# to convert from hf properly.
if "name" in kwargs:
del kwargs["name"]
kwargs_copy = cls_ref._update_kwargs_and_config(kwargs, config_dict)
if classification_labels:
logging.info("Using pretrained classifier layer with num_class={}".format(classification_labels))
model_layer = Encoder(config_dict, classification_labels=classification_labels, **kwargs_copy)
model = model_layer.get_model()
# Give preference to model_checkpoint_dir
if model_checkpoint_dir:
model.load_checkpoint(model_checkpoint_dir)
else:
load_succesfuly = False
if cls_ref.model_path.exists():
try:
if load_from_cache:
model.load_checkpoint(str(cls_ref.model_path))
load_succesfuly = True
except Exception as e:
logging.warn(e)
if convert_from_hf and not load_succesfuly:
if convert_fn_type == "both":
cls_ref.convert_hf_to_tf(
model,
config_dict,
convert_tf_fn=None,
convert_pt_fn=convert_pt,
)
if convert_fn_type == "pt":
cls_ref.convert_hf_to_tf(model, config_dict, convert_tf_fn=None, convert_pt_fn=convert_pt)
if return_layer:
if return_config:
return model_layer, config
return model_layer
if return_config:
return model, config
return model
| StarcoderdataPython |
1767962 | <filename>cwbot/managers/BaseManager.py
import weakref
import time
import abc
import logging
from cwbot import logConfig
import cwbot.util.DebugThreading as threading
from cwbot.util.textProcessing import toTypeOrNone
from cwbot.common.objectContainer import ModuleEntry
from cwbot.common.exceptions import FatalError
from cwbot.util.importClass import easyImportClass
from cwbot.sys.eventSubsystem import EventSubsystem
from cwbot.sys.heartbeatSubsystem import HeartbeatSubsystem
from cwbot.sys.database import encode
class ManagerMetaClass(abc.ABCMeta):
def __init__(cls, name, bases, attrs): #@NoSelf
if 'capabilities' not in attrs:
raise NotImplementedError(
"The '{}' class does not implement a"
" 'capabilities' attribute".format(name))
super(ManagerMetaClass, cls).__init__(name, bases, attrs)
class BaseManager(EventSubsystem.EventCapable,
HeartbeatSubsystem.HeartbeatCapable, metaclass=ManagerMetaClass):
"""
Base class for all manager objects. Every subclass MUST impliment a
capabilities attribute that is a list of strings.
Managers are the middle tier of processing. The CommunicationDirector
holds many managers, and each manager holds many modules.
The job of a manager is to filter information. The CommunicationDirector
passes every Kmail and Chat to every manager. Each manager filters this
information, passing applicable Kmails/Chats to each of its
modules. Manager filtering should be "all-or-nothing": Managers should
decide if a Kmail/Chat is applicable, and if so, pass it to each of
its modules. It is not the job of a manager to determine which of its
modules should process which chat/kmail.
It is also the manager's job to handle permissions by checking if a user
has the required permission before passing chats/kmails to modules. The
same applies to checking in-clan status.
A manager may also pass supplementary information to its modules,
by both supplying information via the _moduleInitData method and
possibly through other methods.
Managers are also in charge of syncing the state of their constituent
modules by periodically calling _syncState(), which utilizes the sqlite3
database.
"""
capabilities = ['inventory', 'chat']
__clanMembers = set([])
__clanNonMembers = {}
_syncLock = threading.RLock() # lock for syncing state
def __init__(self, parent, identity, iData, config):
""" Initialize the BaseManager. When you call this from a
derived class, the following occurs:
1. The manager is linked to the Heartbeat and Event subsystems.
2. Various variables are established.
3. The _configure() method is called.
4. The modules in the config map are added to self._modules.
5. The _initialize() method is called.
"""
self._initialized = False
super(BaseManager, self).__init__(name="sys.{}".format(identity),
identity=identity,
evSys=parent.eventSubsystem,
hbSys=parent.heartbeatSubsystem)
self.__configureOnline = False
self.__initializeOnline = False
self._s = iData.session
self._c = iData.chatManager
logConfig.setFileHandler(identity, "log/{}.log".format(identity))
self._log = logging.getLogger(identity)
self._log.info("----- Manager {} startup -----".format(identity))
self._invMan = iData.inventoryManager
self._props = iData.properties
self._db = iData.database
self.identity = identity
self.syncTime = 300
self._lastSync = time.time()
self._db.createStateTable()
self._persist = self._db.loadStateTable(self.identity)
self._modules = []
self.__parent = weakref.ref(parent)
self._configure(config)
self._addModules(config)
self._initialize()
self._initialized = True
def _configure(self, config):
"""
Perform configuration of the Manager. This should be overridden in
derived classes. But be sure to call its parent's _configure() method
too. Otherwise, self.syncTime will be set to 300. """
try:
self.syncTime = config['sync_interval']
except ValueError:
raise Exception("sync_interval must be integral")
def _addModules(self, config):
""" Dynamically import the modules specified in modules.ini. This
should not be overridden. """
base = config['base']
# loop through modules
for k,v in list(config.items()):
if isinstance(v, dict):
cfg = v
perm = toTypeOrNone(v['permission'], str)
priority = v['priority']
clanOnly = v['clan_only']
# import class
try:
ModuleClass = easyImportClass(base, v['type'])
except ImportError:
raise FatalError("Error importing module/class {0} "
"from base {1}. Either the module does "
"not exist, or there was an error. To "
"check for errors, use the command line "
"'python -m {1}.{0}'; the actual path "
"may vary."
.format(v['type'], base))
self._modules.append(ModuleEntry(
ModuleClass, priority, perm, clanOnly, self, k, cfg))
# sort by decreasing priority
self._modules.sort(key=lambda x: -x.priority)
self._log.info("---- {} creating module instances... ----"
.format(self.identity))
for m in self._modules:
self._log.info("Creating {0.className} with priority "
"{0.priority}, permission {0.permission}."
.format(m))
try:
m.createInstance()
except TypeError as e:
self._log.exception("Error!")
raise FatalError("Error instantiating class {}: {}"
.format(m.className, e.args[0]))
self._log.info("---- All modules created. ----")
def _initialize(self):
""" Runs after _addModules. If there is additional initialization
to do, you should override this, but be sure to call the parent's
_initialize() method to properly initialize the modules. """
self._log.debug("Initializing...")
d = self._moduleInitData()
self._log.debug("Loaded initialization data.")
with self._syncLock:
self._log.debug("Checking persistent state...")
try:
if len(self._persist) == 0:
self._persist['__init__'] = ""
except ValueError:
self._clearPersist()
self._log.debug("Preparing to initialize modules...")
self._initializeModules(d)
self._log.debug("Performing initial state sync...")
self._syncState(force=True)
def _moduleInitData(self):
""" This is the initialization data that is passed when initializing
each module. """
return {}
def _initializeModules(self, initData):
"""(Re)initialize processors. If persistent state is present, it is
loaded and passed to the module's initialize() method; if absent, the
module's initialState property is used instead. If an error occurs,
the initialState is used as well and the old state is deleted.
"""
hbSys = self.heartbeatSubsystem
with self._syncLock:
for m in self._modules:
mod = m.module
self._log.info("Initializing {} ({})."
.format(mod.id, mod.__class__.__name__))
success = False
if mod.id in self._persist:
try:
state = self._persist[mod.id]
if state is None:
self._log.info("Null state for module {}, using "
"default...".format(mod.id))
state = mod.initialState
if len(str(state)) > 500:
self._log.debug("Initializing module {} ({}) with "
"state {{TOO LONG TO FIT}}"
.format(mod.id,
mod.__class__.__name__))
else:
self._log.debug("Initializing module {} ({}) with "
"state {}"
.format(mod.id,
mod.__class__.__name__,
state))
mod.initialize(state, initData)
success = True
except (KeyboardInterrupt, SystemExit,
SyntaxError, FatalError):
raise
except Exception:
self._log.exception("ERROR initializing module "
"with persistent state")
self._log.error("Reverting to unknown state...")
if not success:
self._log.info("No state detected for module {0.id} "
"({0.__class__.__name__}); using default "
"state {0.initialState}".format(mod))
mod.initialize(mod.initialState, initData)
mod.heartbeatRegister(hbSys)
self._log.info("---- Finished initializing modules ----")
def _clearPersist(self):
""" Remove all persistent state data. Note that states are
periodically synced, so if you don't also reset each module, this will
essentially do nothing. """
with self._syncLock:
self._db.updateStateTable(self.identity, {}, purge=True)
self._persist = self._db.loadStateTable(self.identity)
def _syncState(self, force=False):
''' Store persistent data for Modules in the database. '''
with self._syncLock:
if self._persist is None:
return
for m in self._modules:
mod = m.module
self._persist[mod.id] = mod.state
if time.time() - self._lastSync > self.syncTime or force:
self._log.debug("Syncing state for {}".format(self.identity))
try:
self._db.updateStateTable(self.identity, self._persist)
except Exception as e:
for k,v in list(self._persist.items()):
try:
encode([k,v]) # check if JSON error
except:
raise ValueError("Error encoding state {} for "
"module {}: {}"
.format(v, k, e.args))
raise
self._lastSync = time.time()
def checkClan(self, uid):
""" Check if a user is in the same clan as the bot or if they are on
the whitelist. Returns {} if user is not in clan. Otherwise returns
the user record, a dict with keys 'userId', 'userName', 'karma',
'rankName', 'whitelist', and 'inClan'. Note that 'karma' will be zero
if the user is whitelisted and outside the clan (which will be
indicated by the 'inClan' field equal to False). """
if uid <= 0:
return {'inClan': True, 'userId': uid, 'rankName': 'SPECIAL',
'userName': str(uid), 'karma': 1, 'whitelist': False}
info = self.director.clanMemberInfo(uid)
return info
def cleanup(self):
""" run cleanup operations before bot shutdown. This MUST be called
before shutting down by the CommunicationDirector. """
with self._syncLock:
self._log.info("Cleaning up manager {}...".format(self.identity))
self._log.debug("Cleanup: syncing states...")
self._syncState(force=True)
self._initialized = False
self._persist = None
for m in reversed(self._modules):
mod = m.module
self._log.debug("Cleanup: unregistering heartbeat for {}..."
.format(mod.id))
mod.heartbeatUnregister()
self._log.debug("Cleanup: unregistering events for {}..."
.format(mod.id))
mod.eventUnregister()
self._log.debug("Cleanup: cleaning up module {}...".format(mod.id))
mod.cleanup()
self._log.debug("Unregistering heartbeat...")
self.heartbeatUnregister()
self._log.debug("Unregistering events...")
self.eventUnregister()
self._log.info("Done cleaning up manager {}".format(self.identity))
self._modules = None
self._log.info("----- Manager shut down. -----\n")
@property
def director(self):
""" Get a reference to the CommunicationDirector. """
parent = self.__parent()
if parent is not None:
return parent
return None
@property
def session(self):
""" Get the current session (for pyKol requests). """
return self._s
@property
def properties(self):
""" Get the current RunProperties (load various information) """
return self._props
@property
def inventoryManager(self):
""" Get the current InventoryManager """
return self._invMan
@property
def chatManager(self):
return self._c
def defaultChannel(self):
""" Get the default chat channel for this manager. May be overridden
in derived classes. If no channel is specified in sendChatMessage(),
self.defaultChannel is used. By default, this uses the current
chat channel (i.e., not the "listened" channels, the main one). """
return self.chatManager.currentChannel
def sendChatMessage(self,
text, channel=None, waitForReply=False, raw=False):
""" Send a chat message with specified text. If no channel is
specified, self.defaultChannel is used. If waitForReply is true, the
chatManager will block until response data is loaded; otherwise, the
chat is sent asynchronously and no response is available. If raw is
true, the chat is sent undecorated; if false, the chat is sanitized
to avoid /command injections and is decorated in emote format. """
if channel is None:
channel = self.defaultChannel()
if channel is None or channel == "DEFAULT":
channel = self.chatManager.currentChannel
useEmote = not raw
return self.director.sendChat(channel, text, waitForReply, useEmote)
def whisper(self, uid, text, waitForReply=False):
""" Send a private message to the specified user. """
return self.director.whisper(uid, text, waitForReply)
def sendKmail(self, message):
""" Send a Kmail that is not a reply. message should be a Kmail object
from the common.kmailContainer package. """
self.director.sendKmail(message)
def parseChat(self, msg, checkNum):
""" This function is called by the CommunicationDirector every time
a new chat is received. The manager can choose to ignore the chat or
to process it. To ignore the chat, just return []. To process it, pass
the chat to each module and return a LIST of all the replies that
are not None. """
return []
def parseKmail(self, msg):
""" Parse Kmail and return any replies in a LIST of KmailResponses
in the same fashion as the parseChat method. """
return []
def kmailFailed(self, module, message, exception):
""" This is called by the CommunicationDirector if a kmail fails
to send for some reason. """
if module is not None:
module.extendedCall('message_send_failed', message, exception)
def _heartbeat(self):
""" By default, the heartbeat calls syncState(), so in derived classes
be sure to do that too or call the parent _heartbeat(). """
if self._initialized:
self._syncState()
| StarcoderdataPython |
350054 | <gh_stars>1-10
#!/usr/bin/python -S
"""Python implementation of the experiment framework."""
import logging
import os
import subprocess
logger = logging.getLogger(__name__)
EXPERIMENTS_TMP_DIR = '/tmp/experiments'
EXPERIMENTS_DIR = '/fiber/config/experiments'
_experiment_warned = set()
_experiment_enabled = set()
def register(name):
try:
rv = subprocess.call(['register_experiment', name])
except OSError as e:
logger.info('register_experiment: %s', e)
else:
if rv:
logger.error('Failed to register experiment %s.', name)
def enabled(name):
"""Check whether an experiment is enabled.
Copy/pasted from waveguide/helpers.py.
Args:
name: The name of the experiment to check.
Returns:
Whether the experiment is enabled.
"""
if not os.path.exists(os.path.join(EXPERIMENTS_TMP_DIR,
name + '.available')):
if name not in _experiment_warned:
_experiment_warned.add(name)
logger.warning('Warning: experiment %r not registered.', name)
else:
is_enabled = os.path.exists(os.path.join(EXPERIMENTS_DIR,
name + '.active'))
if is_enabled and name not in _experiment_enabled:
_experiment_enabled.add(name)
logger.info('Notice: using experiment %r.', name)
elif not is_enabled and name in _experiment_enabled:
_experiment_enabled.remove(name)
logger.info('Notice: stopping experiment %r.', name)
return is_enabled
| StarcoderdataPython |
6485420 | <filename>marlo/envs/make_env.py
import argparse
import os
import shutil
from pathlib import Path
parser = argparse.ArgumentParser(description='Make a Marlo Env')
parser.add_argument('--name', type=str, required=True, help='the environment name')
parser.add_argument('--mission_file', type=str, required=True, help='the mission file')
parser.add_argument('--description', type=str, default=None, help='a brief description of the env')
args = parser.parse_args()
print("Make env " + args.name)
env_dir = args.name
if not os.path.exists(env_dir):
os.makedirs(env_dir)
templates_dir = env_dir + "/templates"
if not os.path.exists(templates_dir):
os.makedirs(templates_dir)
mission_file = Path(args.mission_file)
if not mission_file.exists():
print("Mission file does not exist!")
exit(-1)
shutil.copy(args.mission_file, templates_dir + "\\mission.xml")
if args.description is None:
description = ""
else:
desciption_file = Path(args.description)
if not desciption_file.exists():
print("description file does not exist")
exit(-2)
description = desciption_file.read_text()
init_py = """
import gym
from .main import MarloEnvBuilder
def _register():
##########################################
# Version 0 of env
##########################################
gym.envs.registration.register(
id='MarLo-%ENV_NAME%-v0',
entry_point=MarloEnvBuilder
)
""".replace("%ENV_NAME%", args.name)
init_file = env_dir + "/__init__.py"
Path(init_file).write_text(init_py)
main_py = """import marlo
from marlo import MarloEnvBuilderBase
from marlo import MalmoPython
import os
from pathlib import Path
class MarloEnvBuilder(MarloEnvBuilderBase):
%ENV_DESCRIPTION%
def __init__(self, extra_params=None):
if extra_params is None:
extra_params={}
super(MarloEnvBuilder, self).__init__(
templates_folder=os.path.join(
str(Path(__file__).parent),
"templates"
)
)
self.params = self._default_params()
# You can do something with the extra_params if you wish
def _default_params(self):
_default_params = super(MarloEnvBuilder, self).default_base_params
return _default_params
if __name__ == "__main__":
env_builder = MarloEnvBuilder()
mission_xml = env_builder.render_mission_spec()
mission_spec = MalmoPython.MissionSpec(mission_xml, True)
print(mission_spec.getSummary())
""".replace('%ENV_DESCRIPTION%', '"""\n' + description + '"""')
main_file = env_dir + "/main.py"
Path(main_file).write_text(main_py)
| StarcoderdataPython |
1714618 | import AtlejgTools.SimulationTools.UnitConversion as u
'''
assumes all values are si
r_i : inner radius [m]
r_o : outer radius [m]
omega : rotational speed [rad/s]
mu : dynamic viscosity [kg/m/s]
rho : density
assumes outer wall is stationary
References:
r1: Technical Note 2006-1: Pine Research Instrumentation. http://www.pineinst.com/echem/files/LMECN200601.pdf
r2: http://astro.berkeley.edu/~jrg/ay202/node140.html (not online. have a printout)
r3: http://www.pineinst.com/echem/files/LMECN200601.pdf
r4: <NAME>. Corrosion-vol61, no6, p515ff
'''
def reynolds_number(r_i, omega, rho, mu):
return rho * omega*r_i * (2*r_i) / mu # r1 eq.1
def taylor_number(r_i, r_o, omega, rho, mu):
d = r_o - r_i
d2 = r_o**2 - r_i**2
Ta = 4 * omega**2 * r_i**2 * d**4 / d2 / (mu/rho)**2
return Ta
def wall_shear_stress(r_i, r_o, omega, rho, mu):
ta = taylor_number(r_i, r_o, omega, rho, mu)
re = reynolds_number(r_i, omega, rho, mu)
print 'ta = %g re = %g' % (ta, re)
if ta < critical_taylor_number(omega, 0):
# now we have couette flow
d = r_o - r_i
d2 = r_o**2 - r_i**2
A = -omega * r_i**2 / d2
B = omega * r_i**2 * r_o**2 / d2
#
dudr_o = A - B/r_o**2 # du/dr @ r=r_o ( == -2*A)
dudr_i = A - B/r_i*2 # du/dr @ r=r_i
tau_o = -mu * dudr_o
tau_i = -mu * dudr_i
return tau_i, tau_o
elif re > 200:
# using r3 eq 5 (turbulent)
return 0.0791 * re**-0.3 * rho * r_i**2 * omega**2
else:
raise Exception("dont know what to do")
def critical_taylor_number(omega_inner, omega_outer):
return 2 * 1707.762 / (1. + omega_outer/omega_inner)
def bob_radius_ala_silverman(d_pipe, v_pipe, rho, mu, Sc):
# using r4 eq 5
d_bob = (8.442 * d_pipe**0.1786 * Sc**0.0857 * (mu/rho)**0.25 * v_pipe**-0.25)**2.333
return d_bob / 2.
def omega(shear_stress, rho, mu, Sc=None, d_pipe=None, v_pipe=None, r_i=None):
if r_i == None: r_i = bob_radius_ala_silverman(d_pipe, v_pipe, rho, mu, Sc)
# new find omega s.t. shear stress on bob is equal to given shear stress
omega = (shear_stress * (2*r_i**2/(mu/rho))**0.3 / (0.0791*r_i**2*rho))**(1./1.7)
#stop
return omega, r_i
| StarcoderdataPython |
6495789 | <reponame>slin96/mmlib
import os
import random
import numpy as np
import torch
SEED = 42
def deterministic(func, f_args=None, f_kwargs=None):
"""
Executed the given function in a deterministic calling set_deterministic before
:param func: The function to execute.
:param f_args: The args for the function to execute.
:param f_kwargs: The kwargs for the function to execute.
:return: The results of the executed function.
"""
if f_kwargs is None:
f_kwargs = {}
if f_args is None:
f_args = []
set_deterministic()
return func(*f_args, **f_kwargs)
def set_deterministic():
"""
Makes execution reproducible following the instructions form:
https://pytorch.org/docs/1.7.1/notes/randomness.html?highlight=reproducibility
"""
# set seeds for pytorch and all used libraries
random.seed(SEED) # seed for random python
torch.manual_seed(SEED) # seed the RNG for all devices (both CPU and CUDA)
np.random.seed(SEED)
# turn of benchmarking for convolutions
torch.backends.cudnn.benchmark = False
# avoid non-deterministic algorithms
torch.set_deterministic(True)
# for CUDA version10.2 or greater: set the environment variable CUBLAS_WORKSPACE_CONFIG according to CUDA
# documentation: https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
# set a debug environment variable CUBLAS_WORKSPACE_CONFIG to
# ":16:8" (may limit overall performance) or
# ":4096:8" (will increase library footprint in GPU memory by approximately 24MiB).
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":16:8"
| StarcoderdataPython |
6539824 | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.views import View
# Create your views here.
from .models import BenURL
def Ben_redirect_view(request, shortcode=None, *args, **kwargs): # function based view
# in the page the url which belongs to the shortcode (other method)
try:
obj = BenURL.objects.get(shortcode = shortcode)
except:
# if sg wrong object is the first url
obj = BenURL.objects.all().first() or "https://www.google.com"
return HttpResponseRedirect(obj.url)
class BenClassBasedView(View):
def get(self, request, shortcode=None, *args, **kwargs):
# in the page the url which belongs to the shortcode (other method)
try:
obj = BenURL.objects.get(shortcode = shortcode)
except:
# if sg wrong object is the first url
obj = BenURL.objects.all().first() or "https://www.google.com"
return HttpResponseRedirect(obj.url)
| StarcoderdataPython |
11241117 | #libreria per generare grafici
import matplotlib.pyplot as plt
#lib to remove files
import os
print("Make the Rewards Plot")
f=open("rewards_taxi_value_iteration.txt","r")
n=0
stringa=f.readline() #conto le ricompense
while stringa!="":
n+=1
stringa=f.readline()
newRewards=[]
rewards=[0 for i in range(n)]
f=open("rewards_taxi_value_iteration.txt","r")
stringa=f.readline()
n=0
while stringa!="":#make the rewards list
rewards[n]=int(stringa)
n+=1
stringa=f.readline()
f.close()
eps=range(1,51)
newRewards.append({"x": eps, "y": rewards, "ls": "-", "label": "Value Iteration"})
plt.figure(figsize=(15, 10))
for s in newRewards:
plt.plot(s["x"], s["y"], label=s["label"])
plt.title("Rewards collected over the time for Taxi game with Value Iteration")
plt.xlabel("Trials")
plt.ylabel("Rewards")
plt.grid()#put the grid
plt.show()#print in output the plot and give the possibility to save it on your computer
plt.savefig("rewards_taxi_value_iteration.png")
os.remove("/home/giacomo/Scrivania/Q_Learning_Games_v3/Q_Learning_Games_v3_Value_Iteration/Taxi_Game_Value_Iteration/Taxi_Analysis/rewards_taxi_value_iteration.txt")
| StarcoderdataPython |
47163 | <filename>tests/test_altitudo.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `altitudo` package."""
import pytest
from click.testing import CliRunner
from altitudo import cli, altitudo
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main, ['--', 39.90974, -106.17188])
print(result.output)
assert result.exit_code == 0
assert '2768.5\n' == result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert 'Usage: altitudo [OPTIONS] LAT LON' in help_result.output
def test_altitudo_single_coordinate():
"""Test a single coordinate"""
elevation = altitudo(lat=39.90974, lon=-106.17188)
assert isinstance(elevation, float)
def test_altitudo_multi_coordinates():
elevations = altitudo(lat=[39.90974, 39.90974], lon=[-106.17188, -106.17188])
assert isinstance(elevations, list)
assert isinstance(elevations[0], dict)
assert len(elevations) == 2
def test_altitudo_convert_feet():
elevation = altitudo(lat=39.90974, lon=-106.17188)
assert elevation == 2768.5
elevation = altitudo(lat=39.90974, lon=-106.17188, feet=True)
assert elevation == 2768.5 * 3.28084
| StarcoderdataPython |
1967202 | # Generated by Django 3.1.5 on 2021-01-26 20:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('almanac', '0004_auto_20210127_0919'),
]
operations = [
migrations.AlterField(
model_name='session',
name='time',
field=models.CharField(choices=[('M', 'Morning'), ('A', 'Afternoon'), ('E', 'Evening')], default='E', max_length=1),
),
]
| StarcoderdataPython |
3360697 | """
Models for event app
"""
from .event import Event
from .eventregistration import EventRegistration
__all__ = ["Event", "EventRegistration"]
| StarcoderdataPython |
3302273 | <gh_stars>0
# Copyright 2019 Adobe
# All Rights Reserved.
#
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it. If you have received this file from a source other than Adobe,
# then your use, modification, or distribution of it requires the prior
# written permission of Adobe.
#
import unittest
from protector.protector_main import Protector
from protector.query.query import OpenTSDBQuery
from mock import mock
p = None
class MockRedis(object):
def exists(self, key):
return False
@mock.patch("redis.Redis", mock.MagicMock(return_value=MockRedis()))
def get_protector():
global p
db_conf = {"redis": {"host":"", "port":"", "password":""}}
p = Protector({"query_no_aggregator": None}, [], [], db_conf, False)
class TestProtector(unittest.TestCase):
def setUp(self):
if not p:
get_protector()
self.payload1 = """
{
"start": "3m-ago",
"queries": [
{
"metric": "mymetric.received.P95",
"aggregator": "max",
"downsample": "20s-max",
"filters": [
{
"filter": "DEV",
"groupBy": false,
"tagk": "environment",
"type": "iliteral_or"
}
]
}
]
}
"""
self.payload2 = """
{
"start": "3m-ago",
"queries": [
{
"metric": "a.mymetric.received.P95",
"aggregator": "max",
"downsample": "20s-max",
"filters": []
}
]
}
"""
self.payload3 = """
{
"start": "3m-ago",
"queries": [
{
"metric": "mymetric",
"aggregator": "max",
"downsample": "20s-max",
"filters": []
}
]
}
"""
self.payload4 = """
{
"start": "3m-ago",
"queries": [
{
"metric": "mymetric",
"aggregator": "none",
"downsample": "20s-max",
"filters": []
}
]
}
"""
def test_blacklist(self):
p.blacklist = ["^releases$", "^mymetric\.", ".*java.*boot.*version.*"]
self.assertFalse(p.check(OpenTSDBQuery(self.payload1)).is_ok())
self.assertTrue(p.check(OpenTSDBQuery(self.payload2)).is_ok())
self.assertTrue(p.check(OpenTSDBQuery(self.payload3)).is_ok())
p.blacklist = []
self.assertTrue(p.check(OpenTSDBQuery(self.payload1)).is_ok())
def test_safe_mode(self):
p.blacklist = ["^releases$", "^mymetric"]
p.safe_mode = True
self.assertTrue(p.check(OpenTSDBQuery(self.payload4)).is_ok())
p.safe_mode = False
self.assertFalse(p.check(OpenTSDBQuery(self.payload4)).is_ok())
def test_invalid_queries(self):
p.safe_mode = False
with self.assertRaisesRegexp(Exception, 'Invalid OpenTSDB query'):
p.check(OpenTSDBQuery('{}'))
with self.assertRaisesRegexp(Exception, 'Invalid OpenTSDB query'):
p.check(OpenTSDBQuery('{"start": ""}'))
| StarcoderdataPython |
1736776 | from datetime import date, timedelta, datetime, timezone
# __pragma__('opov')
def fix_time (dt):
if dt.hour > 23:
dt = dt - timedelta (minutes=60)
if dt.minute > 50:
dt = dt - timedelta (minutes=10)
return dt
def run (autoTester):
# timezone
tz = timezone.utc
autoTester.check (repr (tz))
tz2 = timezone (timedelta (hours=-5), 'EST')
autoTester.check (repr (tz2))
now = fix_time (datetime.utcnow ())
now2 = fix_time (datetime.now (timezone.utc))
autoTester.check (now.day == now2.day)
autoTester.check (now.hour == now2.hour)
autoTester.check (now.replace (tzinfo=timezone.utc).astimezone (tz=None).hour)
# timedelta
delta = timedelta (days=8, minutes=15, microseconds=685)
delta2 = timedelta (days=8, minutes=15, microseconds=684)
autoTester.check (delta)
autoTester.check (delta2)
# timedelta comparisons
autoTester.check (delta == delta2)
autoTester.check (delta > delta2)
autoTester.check (delta < delta2)
# date
d = date (2017, 5, 5)
autoTester.check (d.day)
d = date.today ()
autoTester.check (d)
autoTester.check (d.day)
autoTester.check (d.weekday ())
autoTester.check (d.isoweekday ())
autoTester.check (d.isocalendar ())
autoTester.check (d.ctime ())
d = d.replace (day=28)
autoTester.check (d.day)
autoTester.check (d.strftime ('%Y.%m.%d'))
autoTester.check (d.ctime ())
autoTester.check (d.isoformat ())
# date comparisons
d2 = d + delta
d3 = d2 - delta
autoTester.check (d)
autoTester.check (d2)
autoTester.check (d3)
autoTester.check (d == d3)
autoTester.check (d > d3)
autoTester.check (d < d3)
autoTester.check (d == d2)
autoTester.check (d > d2)
autoTester.check (d < d2)
# datetime
now = fix_time (datetime.now ())
autoTester.check (now.day)
autoTester.check (now.hour)
autoTester.check ((now + timedelta (days=2)).day)
d = datetime (2010, 1, 1, tzinfo=timezone.utc)
autoTester.check (d)
d = datetime (2017, 9, 19, 15, 43, 8, 142)
autoTester.check (d)
autoTester.check (d - timedelta (minutes=150))
d = datetime.strptime ('2017-03-14 15:28:14', '%Y-%m-%d %H:%M:%S')
autoTester.check (d)
autoTester.check (d.strftime ('%Y.%m.%d %H:%M:%S'))
d = d + timedelta (hours=5, minutes=18, seconds=25)
autoTester.check (d.strftime ('%Y-%m-%d %H:%M:%S'))
d = d.replace (year=2016, month=1)
autoTester.check (d.ctime ())
autoTester.check (d.isoformat ())
autoTester.check (d.date ())
autoTester.check (d.time ())
# named tuples not supported, need to convert
autoTester.check (tuple (d.timetuple ()))
autoTester.check (tuple (d.utctimetuple ()))
# datetime comparisons
d2 = d + delta
d3 = d2 - delta
autoTester.check (d)
autoTester.check (d2)
autoTester.check (d3)
autoTester.check (d == d3)
autoTester.check (d > d3)
autoTester.check (d < d3)
autoTester.check (d == d2)
autoTester.check (d > d2)
autoTester.check (d < d2) | StarcoderdataPython |
6445404 | <gh_stars>0
# -*- coding: utf-8 -*-
import os
import json
from landez.sources import DownloadError
import mock
import shutil
from io import BytesIO
import zipfile
from django.test import TestCase
from django.conf import settings
from django.core import management
from django.core.management.base import CommandError
from django.http import HttpResponse, StreamingHttpResponse
from django.test.utils import override_settings
from geotrek.common.factories import FileTypeFactory, RecordSourceFactory, TargetPortalFactory, AttachmentFactory, ThemeFactory
from geotrek.common.utils.testdata import get_dummy_uploaded_image, get_dummy_uploaded_file
from geotrek.diving.factories import DiveFactory, PracticeFactory as PracticeDiveFactory
from geotrek.diving.models import Dive
from geotrek.infrastructure.factories import InfrastructureFactory
from geotrek.sensitivity.factories import SensitiveAreaFactory
from geotrek.signage.factories import SignageFactory
from geotrek.trekking.factories import PracticeFactory as PracticeTrekFactory, TrekFactory, TrekWithPublishedPOIsFactory
from geotrek.trekking import models as trek_models
from geotrek.tourism.factories import InformationDeskFactory, TouristicContentFactory, TouristicEventFactory
class SyncRandoTilesTest(TestCase):
@classmethod
def setUpClass(cls):
super(SyncRandoTilesTest, cls).setUpClass()
@mock.patch('landez.TilesManager.tile', return_value='I am a png')
@mock.patch('landez.TilesManager.tileslist', return_value=[(9, 258, 199)])
def test_tiles(self, mock_tileslist, mock_tiles):
output = BytesIO()
management.call_command('sync_rando', 'tmp', url='http://localhost:8000', verbosity=2, stdout=output)
zfile = zipfile.ZipFile(os.path.join('tmp', 'zip', 'tiles', 'global.zip'))
for finfo in zfile.infolist():
ifile = zfile.open(finfo)
self.assertEqual(ifile.readline(), 'I am a png')
self.assertIn("zip/tiles/global.zip", output.getvalue())
@mock.patch('landez.TilesManager.tile', return_value='Error')
@mock.patch('landez.TilesManager.tileslist', return_value=[(9, 258, 199)])
def test_tile_fail(self, mock_tileslist, mock_tiles):
mock_tiles.side_effect = DownloadError
output = BytesIO()
management.call_command('sync_rando', 'tmp', url='http://localhost:8000', verbosity=2, stdout=output)
zfile = zipfile.ZipFile(os.path.join('tmp', 'zip', 'tiles', 'global.zip'))
for finfo in zfile.infolist():
ifile = zfile.open(finfo)
self.assertEqual(ifile.readline(), 'I am a png')
self.assertIn("zip/tiles/global.zip", output.getvalue())
@override_settings(MOBILE_TILES_URL=['http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'])
@mock.patch('landez.TilesManager.tile', return_value='Error')
@mock.patch('landez.TilesManager.tileslist', return_value=[(9, 258, 199)])
def test_multiple_tiles(self, mock_tileslist, mock_tiles):
mock_tiles.side_effect = DownloadError
output = BytesIO()
management.call_command('sync_rando', 'tmp', url='http://localhost:8000', verbosity=2, stdout=output)
zfile = zipfile.ZipFile(os.path.join('tmp', 'zip', 'tiles', 'global.zip'))
for finfo in zfile.infolist():
ifile = zfile.open(finfo)
self.assertEqual(ifile.readline(), 'I am a png')
self.assertIn("zip/tiles/global.zip", output.getvalue())
@override_settings(MOBILE_TILES_URL='http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png')
@mock.patch('landez.TilesManager.tile', return_value='Error')
@mock.patch('landez.TilesManager.tileslist', return_value=[(9, 258, 199)])
def test_tiles_url_str(self, mock_tileslist, mock_tiles):
mock_tiles.side_effect = DownloadError
output = BytesIO()
management.call_command('sync_rando', 'tmp', url='http://localhost:8000', verbosity=2, stdout=output)
zfile = zipfile.ZipFile(os.path.join('tmp', 'zip', 'tiles', 'global.zip'))
for finfo in zfile.infolist():
ifile = zfile.open(finfo)
self.assertEqual(ifile.readline(), 'I am a png')
self.assertIn("zip/tiles/global.zip", output.getvalue())
@mock.patch('geotrek.trekking.models.Trek.prepare_map_image')
@mock.patch('landez.TilesManager.tile', return_value='I am a png')
@mock.patch('landez.TilesManager.tileslist', return_value=[(9, 258, 199)])
def test_tiles_with_treks(self, mock_tileslist, mock_tiles, mock_prepare):
output = BytesIO()
trek = TrekFactory.create(published=True)
management.call_command('sync_rando', 'tmp', url='http://localhost:8000', verbosity=2, stdout=output)
zfile = zipfile.ZipFile(os.path.join('tmp', 'zip', 'tiles', 'global.zip'))
for finfo in zfile.infolist():
ifile = zfile.open(finfo)
self.assertEqual(ifile.readline(), 'I am a png')
self.assertIn("zip/tiles/global.zip", output.getvalue())
zfile_trek = zipfile.ZipFile(os.path.join('tmp', 'zip', 'tiles', '{pk}.zip'.format(pk=trek.pk)))
for finfo in zfile_trek.infolist():
ifile_trek = zfile_trek.open(finfo)
self.assertEqual(ifile_trek.readline(), 'I am a png')
self.assertIn("zip/tiles/{pk}.zip".format(pk=trek.pk), output.getvalue())
def tearDown(self):
shutil.rmtree('tmp')
class SyncRandoFailTest(TestCase):
@classmethod
def setUpClass(cls):
super(SyncRandoFailTest, cls).setUpClass()
def test_fail_directory_not_empty(self):
os.makedirs(os.path.join('tmp', 'other'))
with self.assertRaises(CommandError) as e:
management.call_command('sync_rando', 'tmp', url='http://localhost:8000',
skip_tiles=True, verbosity=2)
self.assertEqual(e.exception.message, "Destination directory contains extra data")
shutil.rmtree(os.path.join('tmp', 'other'))
def test_fail_url_ftp(self):
with self.assertRaises(CommandError) as e:
management.call_command('sync_rando', 'tmp', url='ftp://localhost:8000',
skip_tiles=True, verbosity=2)
self.assertEqual(e.exception.message, "url parameter should start with http:// or https://")
def test_language_not_in_db(self):
with self.assertRaises(CommandError) as e:
management.call_command('sync_rando', 'tmp', url='http://localhost:8000',
skip_tiles=True, languages='cat', verbosity=2)
self.assertEqual(e.exception.message,
"Language cat doesn't exist. Select in these one : ('en', 'es', 'fr', 'it')")
def test_attachments_missing_from_disk(self):
trek_1 = TrekWithPublishedPOIsFactory.create(published_fr=True)
attachment = AttachmentFactory(content_object=trek_1, attachment_file=get_dummy_uploaded_image())
os.remove(attachment.attachment_file.path)
with self.assertRaises(CommandError) as e:
management.call_command('sync_rando', 'tmp', url='http://localhost:8000',
skip_tiles=True, languages='fr', verbosity=2, stdout=BytesIO(), stderr=BytesIO())
self.assertEqual(e.exception.message, 'Some errors raised during synchronization.')
self.assertFalse(os.path.exists(os.path.join('tmp', 'mobile', 'nolang', 'media', 'trekking_trek')))
@mock.patch('geotrek.trekking.models.Trek.prepare_map_image')
@mock.patch('geotrek.trekking.views.TrekViewSet.list')
def test_response_500(self, mocke_list, mocke_map_image):
output = BytesIO()
mocke_list.return_value = HttpResponse(status=500)
TrekWithPublishedPOIsFactory.create(published_fr=True)
with self.assertRaises(CommandError) as e:
management.call_command('sync_rando', 'tmp', url='http://localhost:8000',
skip_tiles=True, verbosity=2, stdout=output, stderr=BytesIO())
self.assertEqual(e.exception.message, 'Some errors raised during synchronization.')
self.assertIn("failed (HTTP 500)", output.getvalue())
@override_settings(MEDIA_URL=9)
def test_bad_settings(self):
output = BytesIO()
TrekWithPublishedPOIsFactory.create(published_fr=True)
with self.assertRaises(AttributeError) as e:
management.call_command('sync_rando', 'tmp', url='http://localhost:8000',
skip_tiles=True, languages='fr', verbosity=2, stdout=output, stderr=BytesIO())
self.assertEqual(e.exception.message, "'int' object has no attribute 'strip'")
self.assertIn("Exception raised in callable attribute", output.getvalue())
def test_sync_fail_src_file_not_exist(self):
output = BytesIO()
theme = ThemeFactory.create()
theme.pictogram = "other"
theme.save()
with self.assertRaises(CommandError) as e:
management.call_command('sync_rando', 'tmp', url='http://localhost:8000',
skip_tiles=True, languages='fr', verbosity=2, stdout=output, stderr=BytesIO())
self.assertEqual(e.exception.message, 'Some errors raised during synchronization.')
self.assertIn("file does not exist", output.getvalue())
@classmethod
def tearDownClass(cls):
super(SyncRandoFailTest, cls).tearDownClass()
shutil.rmtree('tmp')
class SyncSetup(TestCase):
@classmethod
def setUpClass(cls):
if os.path.exists(os.path.join('tmp_sync_rando')):
shutil.rmtree(os.path.join('tmp_sync_rando'))
if os.path.exists(os.path.join('tmp')):
shutil.rmtree(os.path.join('tmp'))
super(SyncSetup, cls).setUpClass()
def setUp(self):
self.source_a = RecordSourceFactory()
self.source_b = RecordSourceFactory()
self.portal_a = TargetPortalFactory()
self.portal_b = TargetPortalFactory()
information_desks = InformationDeskFactory.create()
self.practice_trek = PracticeTrekFactory.create(order=0)
self.trek_1 = TrekWithPublishedPOIsFactory.create(practice=self.practice_trek, sources=(self.source_a, ),
portals=(self.portal_b,),
published=True)
self.trek_1.information_desks.add(information_desks)
self.attachment_1 = AttachmentFactory.create(content_object=self.trek_1,
attachment_file=get_dummy_uploaded_image())
self.trek_2 = TrekFactory.create(sources=(self.source_b,),
published=True)
self.trek_3 = TrekFactory.create(portals=(self.portal_b,
self.portal_a),
published=True)
self.trek_4 = TrekFactory.create(practice=self.practice_trek, portals=(self.portal_a,),
published=True)
self.practice_dive = PracticeDiveFactory.create(order=0)
self.dive_1 = DiveFactory.create(practice=self.practice_dive, sources=(self.source_a,),
portals=(self.portal_b,),
published=True)
self.attachment_dive = AttachmentFactory.create(content_object=self.dive_1,
attachment_file=get_dummy_uploaded_image())
self.dive_2 = DiveFactory.create(sources=(self.source_b,),
published=True)
self.dive_3 = DiveFactory.create(portals=(self.portal_b,
self.portal_a),
published=True)
self.dive_4 = DiveFactory.create(practice=self.practice_dive, portals=(self.portal_a,),
published=True)
self.poi_1 = trek_models.POI.objects.first()
self.attachment_poi_image_1 = AttachmentFactory.create(content_object=self.poi_1,
attachment_file=get_dummy_uploaded_image())
self.attachment_poi_image_2 = AttachmentFactory.create(content_object=self.poi_1,
attachment_file=get_dummy_uploaded_image())
self.attachment_poi_file = AttachmentFactory.create(content_object=self.poi_1,
attachment_file=get_dummy_uploaded_file())
if settings.TREKKING_TOPOLOGY_ENABLED:
infrastructure = InfrastructureFactory.create(no_path=True, name="INFRA_1")
infrastructure.add_path(self.trek_1.paths.first(), start=0, end=0)
signage = SignageFactory.create(no_path=True, name="SIGNA_1")
signage.add_path(self.trek_1.paths.first(), start=0, end=0)
else:
InfrastructureFactory.create(geom='SRID=2154;POINT(700000 6600000)', name="INFRA_1")
SignageFactory.create(geom='SRID=2154;POINT(700000 6600000)', name="SIGNA_1")
SensitiveAreaFactory.create(published=True)
self.touristic_content = TouristicContentFactory(
geom='SRID=%s;POINT(700001 6600001)' % settings.SRID, published=True)
self.touristic_event = TouristicEventFactory(
geom='SRID=%s;POINT(700001 6600001)' % settings.SRID, published=True)
self.attachment_touristic_content = AttachmentFactory.create(content_object=self.touristic_content,
attachment_file=get_dummy_uploaded_image())
self.attachment_touristic_event = AttachmentFactory.create(content_object=self.touristic_event,
attachment_file=get_dummy_uploaded_image())
def tearDown(self):
shutil.rmtree(os.path.join('tmp'))
class SyncTest(SyncSetup):
@override_settings(THUMBNAIL_COPYRIGHT_FORMAT=u'*' * 300)
def test_sync_pictures_long_title_legend_author(self):
with mock.patch('geotrek.trekking.models.Trek.prepare_map_image'):
management.call_command('sync_rando', 'tmp', with_signages=True,
with_infrastructures=True, with_dives=True,
with_events=True, content_categories="1", url='http://localhost:8000',
skip_tiles=True, skip_pdf=True, verbosity=2, stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'treks.geojson'), 'r') as f:
treks = json.load(f)
# there are 4 treks
self.assertEquals(len(treks['features']),
trek_models.Trek.objects.filter(published=True).count())
@override_settings(THUMBNAIL_COPYRIGHT_FORMAT=u'{author} รฉร @za,ยฃ')
def test_sync_pictures_with_accents(self):
with mock.patch('geotrek.trekking.models.Trek.prepare_map_image'):
management.call_command('sync_rando', 'tmp', with_signages=True,
with_infrastructures=True, with_dives=True,
with_events=True, content_categories="1", url='http://localhost:8000',
skip_tiles=True, skip_pdf=True, verbosity=2, stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'treks.geojson'), 'r') as f:
treks = json.load(f)
# there are 4 treks
self.assertEquals(len(treks['features']),
trek_models.Trek.objects.filter(published=True).count())
@override_settings(SPLIT_TREKS_CATEGORIES_BY_PRACTICE=False, SPLIT_DIVES_CATEGORIES_BY_PRACTICE=False)
def test_sync_without_pdf(self):
management.call_command('sync_rando', 'tmp', with_signages=True, with_infrastructures=True,
with_dives=True, with_events=True, content_categories="1", url='http://localhost:8000',
skip_tiles=True, skip_pdf=True, verbosity=2, stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'treks.geojson'), 'r') as f:
treks = json.load(f)
# there are 4 treks
self.assertEquals(len(treks['features']),
trek_models.Trek.objects.filter(published=True).count())
self.assertEquals(treks['features'][0]['properties']['category']['id'],
treks['features'][3]['properties']['category']['id'],
'T')
self.assertEquals(treks['features'][0]['properties']['name'], self.trek_1.name)
self.assertEquals(treks['features'][3]['properties']['name'], self.trek_4.name)
with open(os.path.join('tmp', 'api', 'en', 'dives.geojson'), 'r') as f:
dives = json.load(f)
# there are 4 dives
self.assertEquals(len(dives['features']),
Dive.objects.filter(published=True).count())
self.assertEquals(dives['features'][0]['properties']['category']['id'],
dives['features'][3]['properties']['category']['id'],
'D')
self.assertEquals(dives['features'][0]['properties']['name'], self.dive_1.name)
self.assertEquals(dives['features'][3]['properties']['name'], self.dive_4.name)
@override_settings(SPLIT_TREKS_CATEGORIES_BY_PRACTICE=True, SPLIT_DIVES_CATEGORIES_BY_PRACTICE=True)
def test_sync_without_pdf_split_by_practice(self):
management.call_command('sync_rando', 'tmp', with_signages=True, with_infrastructures=True,
with_dives=True, with_events=True, content_categories="1", url='http://localhost:8000',
skip_tiles=True, skip_pdf=True, verbosity=2, stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'treks.geojson'), 'r') as f:
treks = json.load(f)
# there are 4 treks
self.assertEquals(len(treks['features']),
trek_models.Trek.objects.filter(published=True).count())
self.assertEquals(treks['features'][0]['properties']['category']['id'],
treks['features'][3]['properties']['category']['id'],
'T%s' % self.practice_trek.pk)
self.assertEquals(treks['features'][0]['properties']['name'], self.trek_1.name)
self.assertEquals(treks['features'][3]['properties']['name'], self.trek_4.name)
with open(os.path.join('tmp', 'api', 'en', 'dives.geojson'), 'r') as f:
dives = json.load(f)
# there are 4 dives
self.assertEquals(len(dives['features']),
Dive.objects.filter(published=True).count())
self.assertEquals(dives['features'][0]['properties']['category']['id'],
dives['features'][3]['properties']['category']['id'],
'D%s' % self.practice_dive.pk)
self.assertEquals(dives['features'][0]['properties']['name'], self.dive_1.name)
self.assertEquals(dives['features'][3]['properties']['name'], self.dive_4.name)
def test_sync_https(self):
with mock.patch('geotrek.trekking.models.Trek.prepare_map_image'):
management.call_command('sync_rando', 'tmp', with_signages=True, with_infrastructures=True, with_dives=True,
with_events=True, content_categories="1", url='https://localhost:8000',
skip_tiles=True, skip_pdf=True, verbosity=2, stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'treks.geojson'), 'r') as f:
treks = json.load(f)
# there are 4 treks
self.assertEquals(len(treks['features']),
trek_models.Trek.objects.filter(published=True).count())
def test_sync_2028(self):
self.trek_1.description = u'toto\u2028tata'
self.trek_1.save()
self.trek_2.delete()
self.trek_3.delete()
self.trek_4.delete()
with mock.patch('geotrek.trekking.models.Trek.prepare_map_image'):
management.call_command('sync_rando', 'tmp', url='http://localhost:8000',
skip_tiles=True, skip_pdf=True, verbosity=2, stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'treks.geojson'), 'r') as f:
treks = json.load(f)
# \u2028 is translated to \n
self.assertEquals(treks['features'][0]['properties']['description'], u'toto\ntata')
@mock.patch('geotrek.trekking.views.TrekViewSet.list')
def test_streaminghttpresponse(self, mocke):
output = BytesIO()
mocke.return_value = StreamingHttpResponse()
trek = TrekWithPublishedPOIsFactory.create(published_fr=True)
management.call_command('sync_rando', 'tmp', url='http://localhost:8000', skip_pdf=True,
skip_tiles=True, verbosity=2, stdout=output)
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'fr', 'treks', str(trek.pk), 'profile.png')))
def test_sync_filtering_sources(self):
# source A only
management.call_command('sync_rando', 'tmp', url='http://localhost:8000',
source=self.source_a.name, skip_tiles=True, skip_pdf=True, verbosity=2,
stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'treks.geojson'), 'r') as f:
treks = json.load(f)
# only 1 trek in Source A
self.assertEquals(len(treks['features']),
trek_models.Trek.objects.filter(published=True,
source__name__in=[self.source_a.name, ]).count())
def test_sync_filtering_sources_diving(self):
# source A only
management.call_command('sync_rando', 'tmp', url='http://localhost:8000', with_dives=True,
source=self.source_a.name, skip_tiles=True, skip_pdf=True, verbosity=2,
stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'dives.geojson'), 'r') as f:
dives = json.load(f)
# only 1 trek in Source A
self.assertEquals(len(dives['features']),
trek_models.Trek.objects.filter(published=True,
source__name__in=[self.source_a.name, ]).count())
def test_sync_filtering_portals(self):
# portal B only
management.call_command('sync_rando', 'tmp', url='http://localhost:8000',
portal=self.portal_b.name, skip_tiles=True, skip_pdf=True, verbosity=2,
stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'treks.geojson'), 'r') as f:
treks = json.load(f)
# only 2 treks in Portal B + 1 without portal specified
self.assertEquals(len(treks['features']), 3)
# portal A and B
management.call_command('sync_rando', 'tmp', url='http://localhost:8000',
portal='{},{}'.format(self.portal_a.name, self.portal_b.name),
skip_tiles=True, skip_pdf=True, verbosity=2, stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'treks.geojson'), 'r') as f:
treks = json.load(f)
# 4 treks have portal A or B or no portal
self.assertEquals(len(treks['features']), 4)
def test_sync_filtering_portals_diving(self):
# portal B only
management.call_command('sync_rando', 'tmp', url='http://localhost:8000', with_dives=True,
portal=self.portal_b.name, skip_tiles=True, skip_pdf=True, verbosity=2,
stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'dives.geojson'), 'r') as f:
dives = json.load(f)
# only 2 dives in Portal B + 1 without portal specified
self.assertEquals(len(dives['features']), 3)
# portal A and B
management.call_command('sync_rando', 'tmp', url='http://localhost:8000',
portal='{},{}'.format(self.portal_a.name, self.portal_b.name), with_dives=True,
skip_tiles=True, skip_pdf=True, verbosity=2, stdout=BytesIO())
with open(os.path.join('tmp', 'api', 'en', 'dives.geojson'), 'r') as f:
dives = json.load(f)
# 4 dives have portal A or B or no portal
self.assertEquals(len(dives['features']), 4)
@mock.patch('geotrek.trekking.models.Trek.prepare_map_image')
@mock.patch('geotrek.diving.models.Dive.prepare_map_image')
@mock.patch('geotrek.tourism.models.TouristicContent.prepare_map_image')
@mock.patch('geotrek.tourism.models.TouristicEvent.prepare_map_image')
class SyncTestPdf(SyncSetup):
def setUp(self):
super(SyncTestPdf, self).setUp()
self.trek_5 = TrekFactory.create(practice=self.practice_trek, portals=(self.portal_a,),
published=True)
filetype_topoguide = FileTypeFactory.create(type='Topoguide')
AttachmentFactory.create(content_object=self.trek_5, attachment_file=get_dummy_uploaded_image(),
filetype=filetype_topoguide)
@override_settings(ONLY_EXTERNAL_PUBLIC_PDF=True)
def test_only_external_public_pdf(self, event, content, dive, trek):
output = BytesIO()
management.call_command('sync_rando', 'tmp', url='http://localhost:8000', verbosity=2,
skip_pdf=False, skip_tiles=True, stdout=output)
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_1.pk), '%s.pdf' % self.dive_1.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_2.pk), '%s.pdf' % self.dive_2.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_3.pk), '%s.pdf' % self.dive_3.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_4.pk), '%s.pdf' % self.dive_4.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_1.pk), '%s.pdf' % self.trek_1.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_2.pk), '%s.pdf' % self.trek_2.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_3.pk), '%s.pdf' % self.trek_3.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_4.pk), '%s.pdf' % self.trek_4.slug)))
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_5.pk), '%s.pdf' % self.trek_5.slug)))
def test_sync_pdfs(self, event, content, dive, trek):
output = BytesIO()
management.call_command('sync_rando', 'tmp', url='http://localhost:8000', verbosity=2,
with_dives=True, skip_tiles=True, stdout=output)
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_1.pk), '%s.pdf' % self.dive_1.slug)))
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_2.pk), '%s.pdf' % self.dive_2.slug)))
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_3.pk), '%s.pdf' % self.dive_3.slug)))
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_4.pk), '%s.pdf' % self.dive_4.slug)))
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_1.pk), '%s.pdf' % self.trek_1.slug)))
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_2.pk), '%s.pdf' % self.trek_2.slug)))
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_3.pk), '%s.pdf' % self.trek_3.slug)))
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_4.pk), '%s.pdf' % self.trek_4.slug)))
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_5.pk), '%s.pdf' % self.trek_5.slug)))
def test_sync_pdfs_portals_sources(self, event, content, dive, trek):
output = BytesIO()
management.call_command('sync_rando', 'tmp', url='http://localhost:8000', verbosity=2,
with_dives=True, skip_tiles=True, portal=self.portal_b.name, source=self.source_a.name,
stdout=output)
# It has to be portal b or 'No portal' and source a : only dive_1 and trek_1 has both of these statements
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_1.pk), '%s.pdf' % self.dive_1.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_2.pk), '%s.pdf' % self.dive_2.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_3.pk), '%s.pdf' % self.dive_3.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'dives', str(self.dive_4.pk), '%s.pdf' % self.dive_4.slug)))
self.assertTrue(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_1.pk), '%s.pdf' % self.trek_1.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_2.pk), '%s.pdf' % self.trek_2.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_3.pk), '%s.pdf' % self.trek_3.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_4.pk), '%s.pdf' % self.trek_4.slug)))
self.assertFalse(os.path.exists(os.path.join('tmp', 'api', 'en', 'treks', str(self.trek_5.pk), '%s.pdf' % self.trek_5.slug)))
| StarcoderdataPython |
11320640 | import requests
import pandas as pd
from quest import util
from quest.static import ServiceType, DatasetSource
from quest.plugins import ProviderBase, SingleFileServiceBase
class UsgsNlcdServiceBase(SingleFileServiceBase):
service_type = ServiceType.GEO_DISCRETE
unmapped_parameters_available = False
geom_type = 'polygon'
datatype = 'discrete-raster'
geographical_areas = ['USA']
bounding_boxes = [[-130.232828, 21.742308, -63.672192, 52.877264]]
_parameter_map = {
'landcover': 'landcover'
}
def search_catalog(self, **kwargs):
base_url = 'https://www.sciencebase.gov/catalog/items'
params = [
('filter', 'tags!=tree canopy'),
('filter', 'tags!=Imperviousness'),
('filter', 'tags=GeoTIFF'),
('max', 1000),
('fields', 'webLinks,spatial,title'),
('format', 'json'),
('parentId', self._parent_id)
]
r = requests.get(base_url, params=params)
catalog_entries = pd.DataFrame(r.json()['items'])
catalog_entries = catalog_entries.loc[~catalog_entries.title.str.contains('Imperv')]
catalog_entries = catalog_entries.loc[~catalog_entries.title.str.contains('by State')]
catalog_entries = catalog_entries.loc[~catalog_entries.title.str.contains('Tree Canopy')]
catalog_entries['geometry'] = catalog_entries['spatial'].apply(_bbox2poly)
catalog_entries['download_url'] = catalog_entries.webLinks.apply(_parse_links)
catalog_entries['filename'] = catalog_entries['download_url'].str.rsplit('/', n=1, expand=True)[1]
catalog_entries['reserved'] = catalog_entries.apply(
lambda x: {'download_url': x['download_url'],
'filename': x['filename'],
'file_format': 'raster-gdal',
'extract_from_zip': '.tif',
}, axis=1)
catalog_entries['parameters'] = 'landcover'
catalog_entries.rename(columns={'id': 'service_id', 'title': 'display_name'},
inplace=True)
catalog_entries.index = catalog_entries['service_id']
# remove extra fields. nested dicts can cause problems
del catalog_entries['relatedItems']
del catalog_entries['webLinks']
del catalog_entries['spatial']
del catalog_entries['link']
del catalog_entries['download_url']
del catalog_entries['filename']
return catalog_entries
class UsgsNlcdService2001(UsgsNlcdServiceBase):
service_name = '2001'
display_name = 'NLCD {} Land Cover'.format(service_name)
description = 'Retrieve NLCD {}'.format(service_name)
_parent_id = '4f70a45ee4b058caae3f8db9'
class UsgsNlcdService2006(UsgsNlcdServiceBase):
service_name = '2006'
display_name = 'NLCD {} Land Cover'.format(service_name)
description = 'Retrieve NLCD {}'.format(service_name)
_parent_id = '4f70a46ae4b058caae3f8dbb'
class UsgsNlcdService2011(UsgsNlcdServiceBase):
service_name = '2011'
display_name = 'NLCD {} Land Cover'.format(service_name)
description = 'Retrieve NLCD {}'.format(service_name)
_parent_id = '513624bae4b03b8ec4025c4d'
class UsgsNlcdProvider(ProviderBase):
service_list = [UsgsNlcdService2001, UsgsNlcdService2006, UsgsNlcdService2011]
display_name = 'National Land Cover Database'
description = 'The National Land Cover Database products are created through a cooperative project conducted by ' \
'the Multi-Resolution Land Characteristics (MRLC) Consortium.'
organization_abbr = 'USGS'
name = 'usgs-nlcd'
def _bbox2poly(bbox):
xmin = bbox['boundingBox']['minX']
xmax = bbox['boundingBox']['maxX']
ymin = bbox['boundingBox']['minY']
ymax = bbox['boundingBox']['maxY']
return util.bbox2poly(xmin, ymin, xmax, ymax, as_shapely=True)
def _parse_links(links):
return [link['uri'] for link in links if link['type'] == 'download'][0]
| StarcoderdataPython |
6481960 | from os.path import dirname
from hamcrest import assert_that, contains
from microcosm.api import create_object_graph
from microcosm_postgres.context import SessionContext, transaction
from microcosm_eventsource.func import last
from microcosm_eventsource.tests.fixtures import Task, TaskEvent, TaskEventType
class TestLast:
def setup(self):
self.graph = create_object_graph(
"microcosm_eventsource",
root_path=dirname(__file__),
testing=True,
)
self.graph.use(
"task_store",
"task_event_store",
"activity_store",
"activity_event_store",
)
self.context = SessionContext(self.graph)
self.context.recreate_all()
self.context.open()
with transaction():
self.task = Task().create()
self.created_event = TaskEvent(
event_type=TaskEventType.CREATED,
task_id=self.task.id,
).create()
self.assigned_event = TaskEvent(
assignee="Alice",
event_type=TaskEventType.ASSIGNED,
parent_id=self.created_event.id,
task_id=self.task.id,
).create()
self.started_event = TaskEvent(
event_type=TaskEventType.STARTED,
parent_id=self.assigned_event.id,
task_id=self.task.id,
).create()
self.reassigned_event = TaskEvent(
assignee="Bob",
event_type=TaskEventType.REASSIGNED,
parent_id=self.started_event.id,
task_id=self.task.id,
).create()
self.reassigned_event = TaskEvent(
event_type=TaskEventType.COMPLETED,
parent_id=self.reassigned_event.id,
task_id=self.task.id,
).create()
def teardown(self):
self.context.close()
self.graph.postgres.dispose()
def test_last(self):
rows = self.context.session.query(
TaskEvent.assignee,
last.of(TaskEvent.assignee),
).order_by(
TaskEvent.clock.desc(),
).all()
assert_that(rows, contains(
contains(None, "Bob"),
contains("Bob", "Bob"),
contains(None, "Alice"),
contains("Alice", "Alice"),
contains(None, None),
))
def test_last_filter_by(self):
rows = self.context.session.query(
TaskEvent.assignee,
last.of(
TaskEvent.assignee,
TaskEvent.event_type == TaskEventType.ASSIGNED,
),
).order_by(
TaskEvent.clock.desc(),
).all()
assert_that(rows, contains(
contains(None, "Alice"),
contains("Bob", "Alice"),
contains(None, "Alice"),
contains("Alice", "Alice"),
contains(None, None),
))
| StarcoderdataPython |
327018 | from __future__ import absolute_import, division, unicode_literals
from io import open
import os
import tempfile
from wsgiref.util import FileWrapper
from celery import states
from celery.exceptions import Ignore
from celery.task import task
from celery.utils.log import get_task_logger
from django.conf import settings
import itertools
import json
import re
import zipfile
from corehq import toggles
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.hqmedia.cache import BulkMultimediaStatusCache
from corehq.apps.hqmedia.models import CommCareMultimedia
from corehq.util.files import file_extention_from_filename
from dimagi.utils.logging import notify_exception
from corehq.util.soft_assert import soft_assert
from soil import DownloadBase
from django.utils.translation import ugettext as _
from soil.progress import update_task_state
from soil.util import expose_file_download, expose_cached_download
logging = get_task_logger(__name__)
MULTIMEDIA_EXTENSIONS = ('.mp3', '.wav', '.jpg', '.png', '.gif', '.3gp', '.mp4', '.zip', )
@task(serializer='pickle')
def process_bulk_upload_zip(processing_id, domain, app_id, username=None, share_media=False,
license_name=None, author=None, attribution_notes=None):
"""
Responsible for processing the uploaded zip from Bulk Upload.
"""
status = BulkMultimediaStatusCache.get(processing_id)
if not status:
# no download data available, abort
return
app = get_app(domain, app_id)
status.in_celery = True
status.save()
uploaded_zip = status.get_upload_zip()
if not uploaded_zip:
return
zipped_files = uploaded_zip.namelist()
status.total_files = len(zipped_files)
checked_paths = []
try:
save_app = False
for index, path in enumerate(zipped_files):
status.update_progress(len(checked_paths))
checked_paths.append(path)
file_name = os.path.basename(path)
try:
data = uploaded_zip.read(path)
except Exception as e:
status.add_unmatched_path(path, _("Error reading file: %s" % e))
continue
media_class = CommCareMultimedia.get_class_by_data(data, filename=path)
if not media_class:
status.add_skipped_path(path, CommCareMultimedia.get_mime_type(data))
continue
app_paths = list(app.get_all_paths_of_type(media_class.__name__))
app_paths_lower = [p.lower() for p in app_paths]
form_path = media_class.get_form_path(path, lowercase=True)
if not form_path in app_paths_lower:
status.add_unmatched_path(path,
_("Did not match any %s paths in application." % media_class.get_nice_name()))
continue
index_of_path = app_paths_lower.index(form_path)
form_path = app_paths[index_of_path] # this is the correct capitalization as specified in the form
multimedia = media_class.get_by_data(data)
if not multimedia:
status.add_unmatched_path(path,
_("Matching path found, but could not save the data to couch."))
continue
is_new = form_path not in app.multimedia_map
is_updated = multimedia.attach_data(data,
original_filename=file_name,
username=username)
if not is_updated and not getattr(multimedia, '_id'):
status.add_unmatched_path(form_path,
_("Matching path found, but didn't save new multimedia correctly."))
continue
if is_updated or is_new:
multimedia.add_domain(domain, owner=True)
if share_media:
multimedia.update_or_add_license(domain, type=license_name, author=author,
attribution_notes=attribution_notes)
save_app = True
app.create_mapping(multimedia, form_path, save=False)
media_info = multimedia.get_media_info(form_path, is_updated=is_updated, original_path=path)
status.add_matched_path(media_class, media_info)
if save_app:
app.save()
status.update_progress(len(checked_paths))
except Exception as e:
status.mark_with_error(_("Error while processing zip: %s" % e))
uploaded_zip.close()
status.complete = True
status.save()
@task(serializer='pickle')
def build_application_zip(include_multimedia_files, include_index_files, app,
download_id, build_profile_id=None, compress_zip=False, filename="commcare.zip",
download_targeted_version=False):
from corehq.apps.hqmedia.views import iter_app_files
DownloadBase.set_progress(build_application_zip, 0, 100)
initial_progress = 10 # early on indicate something is happening
file_progress = 50.0 # arbitrarily say building files takes half the total time
errors = []
compression = zipfile.ZIP_DEFLATED if compress_zip else zipfile.ZIP_STORED
use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
if use_transfer:
fpath = os.path.join(settings.SHARED_DRIVE_CONF.transfer_dir, "{}{}{}{}{}".format(
app._id,
'mm' if include_multimedia_files else '',
'ccz' if include_index_files else '',
app.version,
build_profile_id
))
if download_targeted_version:
fpath += '-targeted'
else:
dummy, fpath = tempfile.mkstemp()
DownloadBase.set_progress(build_application_zip, initial_progress, 100)
if not (os.path.isfile(fpath) and use_transfer): # Don't rebuild the file if it is already there
files, errors, file_count = iter_app_files(
app, include_multimedia_files, include_index_files, build_profile_id,
download_targeted_version=download_targeted_version,
)
if toggles.CAUTIOUS_MULTIMEDIA.enabled(app.domain):
manifest = json.dumps({
'include_multimedia_files': include_multimedia_files,
'include_index_files': include_index_files,
'download_id': download_id,
'build_profile_id': build_profile_id,
'compress_zip': compress_zip,
'filename': filename,
'download_targeted_version': download_targeted_version,
'app': app.to_json(),
}, indent=4)
files = itertools.chain(files, [('manifest.json', manifest)])
with open(fpath, 'wb') as tmp:
with zipfile.ZipFile(tmp, "w") as z:
progress = initial_progress
for path, data in files:
# don't compress multimedia files
extension = os.path.splitext(path)[1]
file_compression = zipfile.ZIP_STORED if extension in MULTIMEDIA_EXTENSIONS else compression
z.writestr(path, data, file_compression)
progress += file_progress / file_count
DownloadBase.set_progress(build_application_zip, progress, 100)
# Integrity check that all media files present in media_suite.xml were added to the zip
if include_multimedia_files and include_index_files and toggles.CAUTIOUS_MULTIMEDIA.enabled(app.domain):
with open(fpath, 'rb') as tmp:
with zipfile.ZipFile(tmp, "r") as z:
media_suites = [f for f in z.namelist() if re.search(r'\bmedia_suite.xml\b', f)]
if len(media_suites) != 1:
message = _('Could not identify media_suite.xml in CCZ')
errors.append(message)
else:
with z.open(media_suites[0]) as media_suite:
from corehq.apps.app_manager.xform import parse_xml
parsed = parse_xml(media_suite.read())
resources = {node.text for node in
parsed.findall("media/resource/location[@authority='local']")}
names = z.namelist()
missing = [r for r in resources if re.sub(r'^\.\/', '', r) not in names]
errors += [_('Media file missing from CCZ: {}').format(r) for r in missing]
if errors:
os.remove(fpath)
update_task_state(build_application_zip, states.FAILURE, {'errors': errors})
raise Ignore() # We want the task to fail hard, so ignore any future updates to it
else:
DownloadBase.set_progress(build_application_zip, initial_progress + file_progress, 100)
common_kwargs = {
'mimetype': 'application/zip' if compress_zip else 'application/x-zip-compressed',
'content_disposition': 'attachment; filename="{fname}"'.format(fname=filename),
'download_id': download_id,
'expiry': (1 * 60 * 60),
}
if use_transfer:
expose_file_download(
fpath,
use_transfer=use_transfer,
**common_kwargs
)
else:
expose_cached_download(
FileWrapper(open(fpath, 'rb')),
file_extension=file_extention_from_filename(filename),
**common_kwargs
)
DownloadBase.set_progress(build_application_zip, 100, 100)
| StarcoderdataPython |
6666562 | <gh_stars>1-10
import numpy as np
from pyrodash.blocks import Arrow
class Spins:
"""
Class to build and draw the spins of an Up Tetrahedron.
The class generates, from the spin values passed to its constructor,
the axis of the spins and their respectives colors, and use the
Arrow class to build and draw the four spins.
...
Attributes
----------
base_spin_axes : numpy array
class attribute with base vectors of the spin axes of an Up
Tetrahedron.
positions : numpy array
contains arrays with the x, y, z coordinates of each spin
position.
s1234 : numpy array
spin values.
axes : numpy array
contains arrays with the spin axes.
colors : list of str
strings of the spin colors.
arrows : list of Arrow object
contains instances of the Arrow class to use as spins.
surfaces : list of plotly go
plotly objects of the drawn spins.
"""
base_spin_axes = np.array(
[[1, 1, 1], [1, -1, -1], [-1, -1, 1], [-1, 1, -1]]
) / np.sqrt(3)
def __init__(self, positions, s1234):
"""
Parameters
----------
positions : numpy array
contains arrays with the x, y, z coordinates of each spin
position.
s1234 : list of int or numpy array
spin values.
"""
self.positions = positions
if not all(s == 1 or s == -1 for s in s1234):
raise ValueError("spin values must be 1 or -1")
else:
self.s1234 = np.array(s1234)
self.axes = np.diag(self.s1234) @ Spins.base_spin_axes
self.colors = ["blue" if s == 1 else "black" for s in self.s1234]
self.arrows = [
Arrow(pos, 0.013, 0.22, axis, surface_color=color)
for pos, axis, color in zip(self.positions, self.axes, self.colors)
]
self.surfaces = [surface for arrow in self.arrows for surface in arrow.surface]
if __name__ == "__main__":
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
positions = np.array(
[
[0.125, 0.125, 0.125],
[0.125, -0.125, -0.125],
[-0.125, -0.125, 0.125],
[-0.125, 0.125, -0.125],
]
)
s = Spins(positions, [1, -1, 1, 1])
fig = go.Figure(data=s.surfaces)
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(dcc.Graph(figure=fig))
app.run_server(debug=True)
| StarcoderdataPython |
9781160 | """Internal API endpoint constant library.
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
_falcon_complete_dashboard_endpoints = [
[
"AggregateAllowList",
"POST",
"/falcon-complete-dashboards/aggregates/allowlist/GET/v1",
"Retrieve aggregate allowlist ticket values based on the matched filter",
"falcon_complete_dashboard",
[
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"AggregateBlockList",
"POST",
"/falcon-complete-dashboards/aggregates/blocklist/GET/v1",
"Retrieve aggregate blocklist ticket values based on the matched filter",
"falcon_complete_dashboard",
[
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"AggregateDetections",
"POST",
"/falcon-complete-dashboards/aggregates/detects/GET/v1",
"Retrieve aggregate detection values based on the matched filter",
"falcon_complete_dashboard",
[
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"AggregateDeviceCountCollection",
"POST",
"/falcon-complete-dashboards/aggregates/devicecount-collections/GET/v1",
"Retrieve aggregate host/devices count based on the matched filter",
"falcon_complete_dashboard",
[
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"AggregateEscalations",
"POST",
"/falcon-complete-dashboards/aggregates/escalations/GET/v1",
"Retrieve aggregate escalation ticket values based on the matched filter",
"falcon_complete_dashboard",
[
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"AggregateFCIncidents",
"POST",
"/falcon-complete-dashboards/aggregates/incidents/GET/v1",
"Retrieve aggregate incident values based on the matched filter",
"falcon_complete_dashboard",
[
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"AggregateRemediations",
"POST",
"/falcon-complete-dashboards/aggregates/remediations/GET/v1",
"Retrieve aggregate remediation ticket values based on the matched filter",
"falcon_complete_dashboard",
[
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"QueryAllowListFilter",
"GET",
"/falcon-complete-dashboards/queries/allowlist/v1",
"Retrieve allowlist tickets that match the provided filter criteria with scrolling enabled",
"falcon_complete_dashboard",
[
{
"type": "integer",
"description": "The maximum records to return. [1-500]",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "The property to sort on, followed by a dot (.), "
"followed by the sort direction, either \"asc\" or \"desc\".",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Optional filter and sort criteria in the form of an FQL query. "
"For more information about FQL queries, see [our FQL documentation in Falcon]"
"(https://falcon.crowdstrike.com/support/documentation/45/falcon-query-language-feature-guide).",
"name": "filter",
"in": "query"
},
{
"type": "string",
"description": "Starting index of overall result set from which to return ids.",
"name": "offset",
"in": "query"
}
]
],
[
"QueryBlockListFilter",
"GET",
"/falcon-complete-dashboards/queries/blocklist/v1",
"Retrieve block listtickets that match the provided filter criteria with scrolling enabled",
"falcon_complete_dashboard",
[
{
"type": "integer",
"description": "The maximum records to return. [1-500]",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "The property to sort on, followed by a dot (.), "
"followed by the sort direction, either \"asc\" or \"desc\".",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Optional filter and sort criteria in the form of an FQL query. "
"For more information about FQL queries, see [our FQL documentation in Falcon]"
"(https://falcon.crowdstrike.com/support/documentation/45/falcon-query-language-feature-guide).",
"name": "filter",
"in": "query"
},
{
"type": "string",
"description": "Starting index of overall result set from which to return ids.",
"name": "offset",
"in": "query"
}
]
],
[
"QueryDetectionIdsByFilter",
"GET",
"/falcon-complete-dashboards/queries/detects/v1",
"Retrieve DetectionsIds that match the provided FQL filter, criteria with scrolling enabled",
"falcon_complete_dashboard",
[
{
"type": "integer",
"description": "The maximum records to return. [1-500]",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "The property to sort on, followed by a dot (.), "
"followed by the sort direction, either \"asc\" or \"desc\".",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Optional filter and sort criteria in the form of an FQL query. "
"For more information about FQL queries, see [our FQL documentation in Falcon]"
"(https://falcon.crowdstrike.com/support/documentation/45/falcon-query-language-feature-guide).",
"name": "filter",
"in": "query"
},
{
"type": "string",
"description": "Starting index of overall result set from which to return ids.",
"name": "offset",
"in": "query"
}
]
],
[
"GetDeviceCountCollectionQueriesByFilter",
"GET",
"/falcon-complete-dashboards/queries/devicecount-collections/v1",
"Retrieve device count collection Ids that match the provided FQL filter, "
"criteria with scrolling enabled",
"falcon_complete_dashboard",
[
{
"type": "integer",
"description": "The maximum records to return. [1-500]",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "The property to sort on, followed by a dot (.), "
"followed by the sort direction, either \"asc\" or \"desc\".",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Optional filter and sort criteria in the form of an FQL query. "
"For more information about FQL queries, see [our FQL documentation in Falcon]"
"(https://falcon.crowdstrike.com/support/documentation/45/falcon-query-language-feature-guide).",
"name": "filter",
"in": "query"
},
{
"type": "string",
"description": "Starting index of overall result set from which to return ids.",
"name": "offset",
"in": "query"
}
]
],
[
"QueryEscalationsFilter",
"GET",
"/falcon-complete-dashboards/queries/escalations/v1",
"Retrieve escalation tickets that match the provided filter criteria with scrolling enabled",
"falcon_complete_dashboard",
[
{
"type": "integer",
"description": "The maximum records to return. [1-500]",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "The property to sort on, followed by a dot (.), followed by the sort "
"direction, either \"asc\" or \"desc\".",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Optional filter and sort criteria in the form of an FQL query. "
"For more information about FQL queries, see [our FQL documentation in Falcon]"
"(https://falcon.crowdstrike.com/support/documentation/45/falcon-query-language-feature-guide).",
"name": "filter",
"in": "query"
},
{
"type": "string",
"description": "Starting index of overall result set from which to return ids.",
"name": "offset",
"in": "query"
}
]
],
[
"QueryIncidentIdsByFilter",
"GET",
"/falcon-complete-dashboards/queries/incidents/v1",
"Retrieve incidents that match the provided filter criteria with scrolling enabled",
"falcon_complete_dashboard",
[
{
"type": "integer",
"description": "The maximum records to return. [1-500]",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "The property to sort on, followed by a dot (.), "
"followed by the sort direction, either \"asc\" or \"desc\".",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Optional filter and sort criteria in the form of an FQL query. "
"For more information about FQL queries, see [our FQL documentation in Falcon]"
"(https://falcon.crowdstrike.com/support/documentation/45/falcon-query-language-feature-guide).",
"name": "filter",
"in": "query"
},
{
"type": "string",
"description": "Starting index of overall result set from which to return ids.",
"name": "offset",
"in": "query"
}
]
],
[
"QueryRemediationsFilter",
"GET",
"/falcon-complete-dashboards/queries/remediations/v1",
"Retrieve remediation tickets that match the provided filter criteria with scrolling enabled",
"falcon_complete_dashboard",
[
{
"type": "integer",
"description": "The maximum records to return. [1-500]",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "The property to sort on, followed by a dot (.), followed by "
"the sort direction, either \"asc\" or \"desc\".",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Optional filter and sort criteria in the form of an FQL query. "
"For more information about FQL queries, see [our FQL documentation in Falcon]"
"(https://falcon.crowdstrike.com/support/documentation/45/falcon-query-language-feature-guide).",
"name": "filter",
"in": "query"
},
{
"type": "string",
"description": "Starting index of overall result set from which to return ids.",
"name": "offset",
"in": "query"
}
]
]
]
| StarcoderdataPython |
3225373 | # -*- coding: utf-8 -*-
# The dos-azul-lambda request handling stack is generally structured like so:
#
# /\ * Endpoint handlers, named for the DOS operation converted to
# /__\ snake case (e.g. list_data_bundles).
# / \ * ElasticSearch helper functions that implement common query types
# /______\ such as matching on a certain field, with names matching `azul_*`
# / \ * :func:`make_es_request`, and :func:`es_query`, which make the
# /__________\ actual ElasticSearch requests using :mod:`requests`
#
# Error catching should be handled as follows:
# * Functions that return :class:`~chalice.Response` objects should raise
# Chalice exceptions where appropriate. Chalice exceptions will halt
# control flow and return a response with an appropriate error code and
# a nice message.
# * Functions that don't return :class:`~chalice.Response` objects should
# raise builtin exceptions where appropriate. Those exceptions should be
# caught by the aforementioned and either ignored or replaced with Chalice
# exceptions.
# * Endpoint handlers should raise exceptions consistent with the DOS schema.
# * Between all of this, exception logging should occur at the lowest level,
# next to where an exception is raised. This generally means
# :func:`make_es_request` and :func:`es_query`.
import datetime
import json
import logging
import os
from chalice import Chalice, Response, BadRequestError, UnauthorizedError, \
NotFoundError, ChaliceViewError
from boto.connection import AWSAuthConnection
import ga4gh.dos.client
import ga4gh.dos.schema
import pytz
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('dos-azul-lambda')
# We only need the client for the models, so we can provide any URL
dos_client = ga4gh.dos.client.Client(url='https://example.com/abc', local=True)
def model(model_name, **kwargs):
return dos_client.models.get_model(model_name)(**kwargs)
def parse_azul_date(azul_date):
"""
:rtype: datetime.datetime
"""
# Process the string first to account for inconsistencies in date storage in Azul
date = azul_date.rstrip('Z').replace(':', '') + 'Z'
date = datetime.datetime.strptime(date, '%Y-%m-%dT%H%M%S.%fZ')
return date.replace(tzinfo=pytz.utc)
def azul_to_obj(result):
"""
Takes an Azul ElasticSearch result and converts it to a DOS data
object.
:param result: the ElasticSearch result dictionary
:rtype: DataObject
"""
azul = result['_source']
data_object = model(
model_name='DataObject',
id=azul['file_id'],
name=azul['title'],
size=str(azul.get('fileSize', '')),
created=parse_azul_date(azul['lastModified']),
updated=parse_azul_date(azul['lastModified']),
version=azul['file_version'],
checksums=[model('Checksum', checksum=azul['fileMd5sum'], type='md5')],
urls=[model('URL', url=url) for url in azul['urls']],
aliases=azul['aliases'],
)
return data_object
def obj_to_azul(data_object):
"""
Takes a data object and converts it to an Azul object.
:rtype: dict
"""
# updated is optional but created is not
date = data_object.get('updated', data_object['created']).replace(':', '')
date = datetime.datetime.strptime(date, '%Y-%m-%dT%H%M%S.%f+0000')
date = date.replace(tzinfo=pytz.utc)
date = date.strftime('%Y-%m-%dT%H%M%S.%fZ')
checksum = data_object['checksums'][0]
azul = {
'file_id': data_object['id'],
'title': data_object.get('name', ''), # name is optional
'fileSize': data_object.get('size', ''),
'lastModified': date,
'file_version': data_object.get('version'),
'fileMd5sum': checksum['checksum'] if checksum['type'] == 'md5' else '',
'urls': [url['url'] for url in data_object['urls']],
'aliases': data_object.get('aliases'), # aliases are optional
}
return azul
def azul_to_bdl(result):
"""
Takes an Azul ElasticSearch result and converts it to a DOS data
bundle.
:param result: the ElasticSearch result dictionary
:return: DataBundle
"""
azul = result['_source']
data_bundle = model(
model_name='DataBundle',
id=azul['id'],
data_object_ids=azul['data_object_ids'],
created=parse_azul_date(azul['created']),
updated=parse_azul_date(azul['updated']),
version=azul['version'],
description=azul.get('description', ''), # optional field
aliases=azul.get('aliases', ''), # optional field
)
data_bundle.checksums = []
for checksum in azul['checksums']:
checksum, checksum_type = checksum.split(':', 1)
data_bundle.checksums.append(model('Checksum', checksum=checksum, type=checksum_type))
return data_bundle
def check_auth():
"""
Execute during a request to check the ``access_token`` key in the
request headers.
:return: True if ``access_token`` is valid, False otherwise
:rtype: bool
"""
return app.current_request.headers.get('access_token', None) == access_token
class ESConnection(AWSAuthConnection):
def __init__(self, region, **kwargs):
super(ESConnection, self).__init__(**kwargs)
self._set_auth_region_name(region)
self._set_auth_service_name('es')
def _required_auth_capability(self):
return ['hmac-v4']
DEFAULT_REGION = 'us-west-2'
DEFAULT_ACCESS_TOKEN = 'f4ce9d3d23f4ac9dfdc3c825608dc660'
INDEXES = {
'data_obj': os.environ.get('DATA_OBJ_INDEX', 'fb_index'),
'data_bdl': os.environ.get('DATA_BDL_INDEX', 'db_index'),
}
DOCTYPES = {
'data_obj': os.environ.get('DATA_OBJ_DOCTYPE', 'meta'),
'data_bdl': os.environ.get('DATA_BDL_DOCTYPE', 'databundle'),
}
try:
es_host = os.environ['ES_HOST']
except KeyError:
raise RuntimeError("You must specify the domain name of your ElasticSearch"
" instance with the ES_HOST environment variable.")
es_region = os.environ.get('ES_REGION', DEFAULT_REGION)
access_token = os.environ.get('ACCESS_KEY', DEFAULT_ACCESS_TOKEN)
client = ESConnection(region=es_region, host=es_host, is_secure=False)
app = Chalice(app_name='dos-azul-lambda')
app.debug = os.environ.get('DEBUG', False) == 'True'
base_path = '/ga4gh/dos/v1'
@app.route('/', cors=True)
def index():
resp = make_es_request(method='GET', path='/')
return resp.read()
@app.route('/test_token', methods=["GET", "POST"], cors=True)
def test_token():
"""
A convenience endpoint for testing whether an access token
is active or not. Will return a JSON with a key `authorized`
and a boolean regarding the key's value.
"""
body = {'authorized': check_auth()}
return Response(body, status_code=200 if body['authorized'] else 401)
def make_es_request(**kwargs):
"""
Wrapper around :meth:`ESConnection.make_request` that checks if the
request was completed successfully.
:param kwargs: same as arguments to :meth:`ESConnection.make_request`
:raises RuntimeError: if the request does not return HTTP 200
"""
request = "%s %s" % (kwargs['method'], kwargs['path'])
logger.debug(request + " " + kwargs.get('data', ""))
r = client.make_request(**kwargs)
if r.status != 200:
data = json.loads(r.read())
data = data.get('Message', '') or data.get('reason', '') or repr(data)
msg = "%s returned code %d, expcted 200: %s" % (request, r.status, data)
logger.exception(msg)
raise RuntimeError(msg)
# If `app.debug=False` (which it is for deployments), an uncaught
# exception will cause the server to automatically return a 500 response
# with a nice error message and interally log a traceback.
return r
def es_query(index, **query):
"""
Queries the configured ElasticSearch instance and returns the
results as a list of dictionaries
:param query: key-value pairs to insert into the the ElasticSearch query
:param str index: the name of the index to query
:raises RuntimeError: if the response from the ElasticSearch instance
loads successfully but can't be understood by
dos-azul-lambda
:rtype: list
"""
logger.debug("Querying index %s with query %r" % (index, query))
query = make_es_request(method='GET', data=json.dumps(query),
path='/{index}/_search'.format(index=index))
response = json.loads(query.read())
try:
hits = response['hits']['hits']
except KeyError:
msg = "ElasticSearch returned an unexpected response: %s", query.read()
logger.exception(msg)
raise RuntimeError(msg)
return hits
def azul_match_field(index, key, val, size=1):
"""
Wrapper function around :func:`es_query`. Should be used for queries
where you expect only one result (e.g. GetDataBundle).
:param str index: the name of the index to query
:param str key: the key of the field to match against
:param str val: the value of the field to match against
:param int size: the amount of results to return
:raises LookupError: if no results are returned
:rtype: :class:`AzulDocument`
"""
results = es_query(index=index, size=size,
query={'bool': {'must': {'term': {key: val}}}})
if len(results) < 1:
# We don't need to log an exception here since this kind of error could
# occur if a user requests a file that does not exist.
raise LookupError("Query returned no results")
return results[0]
def azul_match_alias(index, alias, from_=None, size=10):
"""
Wrapper function around :func:`es_query`. By default, this function
will return more than one result (intended for usage in ListDataObjects,
etc.
:param str index: the name of the index to query
:param str key: the key of the alias to match against
:param str val: the value of the alias to match against
:param str from_: page_token
:param int size: the amount of results to return
:raises LookupError: if no results are returned
:rtype: list
"""
dsl = {'term': {'aliases.keyword': alias}}
if from_:
dsl['from'] = from_
# es_query will raise a RuntimeError if it doesn't understand the ES response
# There isn't really any other exception we can check for here.
query = es_query(index=index, query=dsl, size=size)
return query
def azul_get_document(key, val, name, es_index, map_fn, model):
"""
Queries ElasticSearch for a single document and returns a
:class:`~chalice.Response` object with the retrieved data. Wrapper
around :func:`azul_match_field`. Implements lookup functionality used
in :func:`get_data_object` and :func:`get_data_bundle`.
:param str key: the key to search for in the given ElasticSearch index
:param str val: the value to search for in the given ElasticSearch index
:param str name: the key the document should be returned under
:param str es_index: the name of the index to query in ElasticSearch
:param callable map_fn: function mapping the returned Azul document to a
DOS format
:param model: DOS response model
:raises RuntimeError: if the ElasticSearch response is not understood
:rvtype: :class:`chalice.Response`
:returns: the retrieved data or the error state
"""
try:
data = azul_match_field(index=es_index, key=key, val=val)
data = map_fn(data)
# Double check to verify identity
if data['id'] != val:
raise LookupError("ID mismatch in results")
except LookupError:
# azul_match_field will also raise a LookupError if no results are returned.
# This isn't really an error, as a user requesting an object that could
# not be found is generally not unexpected.
raise NotFoundError("No results found for type %s and ID %s." % (name, val))
except RuntimeError:
# es_query will raise a RuntimeError if it doesn't understand the ES
# response. It is logged in :func:`es_query`
raise ChaliceViewError("Received an unexpected response from Azul.")
except Exception:
# If anything else happens...
logger.exception("Unexpected error attempting to retrieve {name} "
"{key}={val} from index {es_index} using transformer"
" {fn}".format(name=name, key=key, val=val,
es_index=es_index, fn=map_fn.func_name))
raise ChaliceViewError("There was a problem communicating with Azul.")
return Response(model(**{name: data}).marshal(), status_code=200)
@app.route(base_path + '/dataobjects/{data_object_id}', methods=['GET'], cors=True)
def get_data_object(data_object_id):
"""
Gets a data object by file identifier by making a query against the
configured data object index and returns the first matching file.
:param data_object_id: the id of the data object
:rtype: DataObject
"""
return azul_get_document(key='file_id', val=data_object_id, name='data_object',
map_fn=azul_to_obj, es_index=INDEXES['data_obj'],
model=dos_client.models.get_model('GetDataObjectResponse'))
@app.route(base_path + '/databundles/{data_bundle_id}', methods=['GET'], cors=True)
def get_data_bundle(data_bundle_id):
"""
Gets a data bundle by its identifier by making a query against the
configured data bundle index. Returns the first matching file.
:param data_bundle_id: the id of the data bundle
:rtype: DataBundle
"""
return azul_get_document(key='id', val=data_bundle_id, name='data_bundle',
map_fn=azul_to_bdl, es_index=INDEXES['data_bdl'],
model=dos_client.models.get_model('GetDataBundleResponse'))
@app.route(base_path + '/dataobjects', methods=['GET'], cors=True)
def list_data_objects(**kwargs):
"""
Page through the data objects index and return data objects,
respecting an alias or checksum request if it is made.
:rtype: ListDataObjectsResponse
"""
req_body = app.current_request.query_params or {}
per_page = int(req_body.get('page_size', 10))
# Build the query. If multiple criteria are specified, returned objects
# should match all of the provided criteria (logical AND).
query = {'query': {}, 'size': per_page + 1}
if 'page_token' in req_body: # for paging
query['from'] = req_body['page_token'] or 0
if 'alias' in req_body or 'checksum' in req_body or 'url' in req_body:
query['query']['bool'] = {'filter': []}
# Azul only stores MD5s so there are no results if checksum_type != md5
if 'checksum_type' in req_body and req_body['checksum_type'].lower() != 'md5':
return {'data_objects': []}
if 'alias' in req_body:
query['query']['bool']['filter'].append({
'term': {
'aliases.keyword': {'value': req_body['alias']}
}
})
if 'checksum' in req_body:
query['query']['bool']['filter'].append({
'term': {
'fileMd5sum.keyword': {'value': req_body['checksum']}
}
})
if 'url' in req_body:
query['query']['bool']['filter'].append({
'term': {
'urls.keyword': {'value': req_body['url']}
}
})
else: # if no query parameters are provided
query['query']['match_all'] = {}
results = es_query(index=INDEXES['data_obj'], **query)
response = model('ListDataObjectsResponse')
response.data_objects = [azul_to_obj(x) for x in results[:per_page]]
if len(results) > per_page:
response.next_page_token = str(int(req_body.get('page_token', 0)) + 1)
return response.marshal()
@app.route(base_path + '/databundles', methods=['GET'], cors=True)
def list_data_bundles(**kwargs):
"""
Page through the data bundles index and return data bundles,
respecting an alias or checksum request if it is made.
:rtype: ListDataBundlesResponse
"""
req_body = app.current_request.query_params or {}
page_token = req_body.get('page_token', 0)
per_page = int(req_body.get('page_size', 10))
if req_body.get('alias', None):
results = azul_match_alias(index=INDEXES['data_bdl'],
alias=req_body['alias'], size=per_page + 1,
from_=page_token if page_token != 0 else None)
else:
results = es_query(query={}, index=INDEXES['data_bdl'], size=per_page + 1)
response = model('ListDataBundlesResponse')
response.data_bundles = [azul_to_bdl(x) for x in results[:per_page]]
if len(results) > per_page:
response.next_page_token = str(int(page_token) + 1)
return response.marshal()
@app.route(base_path + '/dataobjects/{data_object_id}', methods=['PUT'], cors=True)
def update_data_object(data_object_id):
"""
Updates a data object. The data object must exist.
:param data_object_id: the id of the data object to update
"""
# Ensure that the user is authenticated first
if not check_auth():
raise UnauthorizedError("You're not authorized to use this service. "
"Did you set access_token in the request headers?")
# Make sure that the data object to update exists
try:
source = azul_match_field(index=INDEXES['data_obj'], key='file_id', val=data_object_id)
except LookupError:
raise NotFoundError("Data object not found.")
# Check that a data object was provided in the request
body = app.current_request.json_body
if not body or not body.get('data_object', None):
raise BadRequestError("Please add a data_object to the body of your request.")
# Now that we know everything is okay, do the actual update
path = '/{}/{}/{}/_update'.format(INDEXES['data_obj'], DOCTYPES['data_obj'], source['_id'])
data = json.dumps({'doc': obj_to_azul(body['data_object'])})
make_es_request(method='POST', path=path, data=data)
return model('UpdateDataObjectResponse', data_object_id=data_object_id).marshal()
@app.route('/swagger.json', cors=True)
def swagger():
"""
An endpoint for returning the Swagger API description.
"""
swagger = ga4gh.dos.schema.from_chalice_routes(app.routes)
swagger['basePath'] = '/api/ga4gh/dos/v1'
return swagger
| StarcoderdataPython |
9762073 | <reponame>Venafi/pytpp<gh_stars>1-10
from pytpp.attributes._helper import IterableMeta, Attribute
from pytpp.attributes.metadata_base import MetadataBaseAttributes
class MetadataListAttributes(MetadataBaseAttributes, metaclass=IterableMeta):
__config_class__ = "Metadata List"
single = Attribute('Single')
| StarcoderdataPython |
1717818 | x_nums = list(map(float, input().split()))
y_nums = list(map(float, input().split()))
# points = []
# for index in range(0, len(x_nums)):
# current = (x_nums[index], y_nums[index])
# points.append()
points = list(zip(x_nums, y_nums))
print(points)
print(points[0][0]) | StarcoderdataPython |
5076343 | # ref: https://www.youtube.com/watch?v=O20Y1XR6g0A&list=PLoVvAgF6geYMb029jpxqMuz5dRDtO0ydM&index=4
#import os
from influxdb import InfluxDBClient
from config import HOST, PORT, USERNAME, PASSWORD, DATABASE, TEMPERATURE, HUMIDITY, ROOM1
# following config moved to config.py file
# InfluxDB credentials
#HOST = os.environ.get('INFLUXDB_HOST', 'localhost')
#PORT = os.environ.get('INFLUXDB_PORT', 8086)
#USERNAME = os.environ.get('INFLUXDB_USER', 'influxDBuser')
#PASSWORD = os.environ.get('INFLUXDB_USER_PASSWORD', '<PASSWORD>')
#DATABASE = os.environ.get('INFLUXDB_DB', 'strawberry_factory')
def client():
# InfluxDB client setup
client = InfluxDBClient(host=HOST, port=int(PORT), username=USERNAME, password=PASSWORD)
# databases
#client.get_list_database()
# create a database
client.create_database(DATABASE)
# use a database
client.switch_database(DATABASE)
# measurements/tables
#client.get_list_measurements()
return client
def save(db_client, measurement, fields, tags=None):
# json data
"""
json_body = {}
json_body['measurement'] = measurement
if tags != None:
json_body['tags'] = tags
json_body['fields'] = fields
# make list
json_body = [json_body]
"""
# alternatively
json_body = [{'measurement': measurement, 'tags': tags, 'fields': fields}]
# write / save into a row
db_client.write_points(json_body)
def send_influxdb(data, measurement='temperature'):
# InfluxDB server
db_client = client()
if measurement == 'temperature':
# json body for temperature
t_measurement = TEMPERATURE
t_tags = { "place": ROOM1 }
t_fields = { "value" : data}
# save @influxdb
save(db_client, t_measurement, t_fields, tags=t_tags)
elif measurement == 'humidity':
# json body for humidity
h_measurement = HUMIDITY
h_tags = { "place": ROOM1 }
h_fields = { "value" : data}
# save @influxdb
save(db_client, h_measurement, h_fields, tags=h_tags)
else:
print("Positional argument (measurement) required!")
| StarcoderdataPython |
5081380 | <reponame>StawaDev/Estrapy-API<filename>Estrapy/games.py<gh_stars>1-10
from io import BytesIO
from PIL import Image
from .http import get_api, BASE_URL
from .base import Base, ObjectConverter
from typing import Union, Optional
import json
import requests
import random as rd
import time
__all__ = ("Games", "AniGames", "OsuClients", "Trivia")
Converter = ObjectConverter()
class Games:
@staticmethod
async def truth(generate: Optional[int] = None) -> None:
"""
Description
--------------
A Function That Will Return a Random Truth Challenge
How to use truth function (Examples)
----------------------------
```
async def truth():
print(await Estrapy.Games.truth()) # Keep it as function or it will return function type
```
:param generate
:type formatter: Optional[int]
"""
url = "games/truth"
if generate:
return await Base.produce(total=generate, full_url=url, type="text")
return get_api(url)["text"]
@staticmethod
async def dare(generate: Optional[int] = None) -> None:
"""
Description
--------------
A Function That Will Return a Random Dare Challenge As Text
How to use dare function (Examples)
----------------------------
```
async def truth():
print(await Estrapy.Games.truth()) # Keep it as function or it will return function type
```
:param generate
:type formatter: Optional[int]
"""
url = "games/dare"
if generate:
return await Base.produce(total=generate, full_url=url, type="text")
return get_api(url)["text"]
@staticmethod
async def shipper(
player: Optional[str],
player2: Optional[str],
player_image: Union[str, bytes] = None,
player2_image: Union[str, bytes] = None,
background_image: Union[str, bytes] = None,
background_size: Optional[tuple] = None,
):
"""
Description
--------------
A Function That Will Return an Edited Image with customized Background and Player Image.
Currently, for the customized image like player_image still using a url. And also, for the background image default size is `1920x1080`.
There's 2 available size for custom background picture, `1920x1080` and `1280x720`.
In case you don't want to add an custom background or a player image, you can add `None` to the parameter value.
How to use shipper (image) function (Examples)
----------------------------
```
async def shipper():
test = await Estrapy.Games.shipper(player="Player1", player2="Player2", player_image="None", player2_image="None", background="None", background_size="None")
test.show()
```
:param player
:type player: Optional[str]
:param player2
:type player2: Optional[str]
:param player_image
:type player_image: Union[str, bytes]
:param player_image2
:type player_image2: Union[str, bytes]
:param background_image
:type background_image: Union[str, bytes]
:param background_size
:type background_size: Optional[str]
"""
url = f"{BASE_URL}/games/shipper/image/?player={player}&player2={player2}&player_image={player_image}&player2_image={player2_image}&background={background_image}"
if background_size is None:
req = requests.get(url)
if background_size is not None:
size = f"{background_size[0]}x{background_size[1]}"
req = requests.get(f"{url}&background_size={size}")
a = Image.open(BytesIO(req.content))
return a
class AniGames:
@staticmethod
async def truth(generate: Optional[int] = None) -> None:
"""
Description
--------------
A Function That Will Return a Random Truth About Anime Challenge
How to use truth [about anime] as function (Examples)
----------------------------
```
async def truth():
print(await Estrapy.AniGames.truth()) # Keep it as function or it will return function type
```
:param generate
:type formatter: Optional[int]
"""
url = "anigames/truth"
if generate:
return await Base.produce(total=generate, full_url=url, type="text")
return get_api(url)["text"]
@staticmethod
async def dare(generate: Optional[int] = None) -> None:
"""
Description
--------------
A Function That Will Return a Random Dare Challenge As Text
How to use dare [about anime] function (Examples)
----------------------------
```
async def dare():
print(await Estrapy.AniGames.dare()) # Keep it as function or it will return function type
```
:param generate
:type formatter: Optional[int]
"""
url = "anigames/dare"
if generate:
return await Base.produce(total=generate, full_url=url, type="text")
return get_api(url)["text"]
@staticmethod
async def waifu(formatter: bool = False):
"""
Description
--------------
A Function That Will Return a Random Waifu Picture As PNG
How to use waifu [about anime] function (Examples)
----------------------------
```
async def waifu():
print(await Estrapy.AniGames.waifu()) # Keep it as function or it will return function type
```
:param formatter
:type formatter: bool, default `False`
"""
url = get_api("anigames/waifu")
if formatter:
return await Base.JSONFormatter(url)
return url
@staticmethod
async def husbando(formatter: bool = False):
"""
Description
--------------
A Function That Will Return a Random Husbando Picture As PNG
How to use husbando [about anime] function (Examples)
----------------------------
```
async def husbando():
print(await Estrapy.AniGames.husbando()) # Keep it as function or it will return function type
```
:param formatter
:type formatter: bool, default `False`
"""
url = get_api("anigames/husbando")
if formatter:
return await Base.JSONFormatter(url)
return url
@staticmethod
async def shipper_waifu(player: str, formatter: bool = False):
"""
Shipper_Waifu
--------------
Return Shipper Waifu JSON Data from EstraAPI
Examples
--------------
```
async def shipper_waifu():
print(await Estrapy.AniGames.shipper_waifu(player="Stawa"))
```
:param player
:type player: str
:param formatter
:type formatter: bool, default `False`
"""
url = get_api(f"anigames/shipper/waifu/?player={player}")
if formatter:
return await Base.JSONFormatter(url)
return url
@staticmethod
async def shipper_husbando(player: str, formatter: bool = False):
"""
Shipper_Husbando
--------------
Return Shipper Husbando JSON Data from EstraAPI
Examples
--------------
```
async def shipper_husbando():
print(await Estrapy.AniGames.shipper_husbando(player="Stawa"))
```
:param player
:type player: str
:param formatter
:type formatter: bool, default `False`
"""
url = get_api(f"anigames/shipper/husbando/?player={player}")
if formatter:
return await Base.JSONFormatter(url)
return url
class OsuClients:
def __init__(
self,
client_id: Union[int, str] = None,
client_secret: str = None,
output: str = "json",
) -> None:
self.client_id = client_id
self.client_secret = client_secret
self.output = output
self.output_list = ["json", "object"]
async def osuprofile(
self,
username: Union[int, str] = None,
formatter: bool = False,
):
"""
Osuprofile
--------------
Return Osuprofile JSON Data
Examples
--------------
```
async def osuprofile():
print(await Estrapy.OsuClients.osuprofile(username="Stawa"))
```
:param username
:type username: int or str
:param client_id
:type client_id: int
:param client_secret
:type client_secret: str
:param formatter: It will formatting JSON Data with EstraFormatter
:type formatter: bool, default `False`
"""
url = get_api(
f"osu/?user={username}&client_id={self.client_id}&client_secret={self.client_secret}"
)
if self.output == self.output_list[1]:
return Converter.convert_obj(json.dumps(url))
if formatter:
return await Base.JSONFormatter(url)
return url
async def osubeatmap(
self,
beatmap_id: int,
formatter: bool = False,
):
"""
Osubeatmap
--------------
Return Osubeatmap JSON Data
Examples
--------------
```
async def osubeatmap():
print(await Estrapy.OsuClients.osubeatmap(beatmap_id="2405223"))
```
:param beatmap_id
:type beatmap_id: int or str
:param client_id
:type client_id: int
:param client_secret
:type client_secret: str
:param formatter: It will formatting JSON Data with EstraFormatter
:type formatter: bool, default `False`
"""
url = get_api(
f"osubeatmap/?id={beatmap_id}&client_id={self.client_id}&client_secret={self.client_secret}"
)
if self.output == self.output_list[1]:
return Converter.convert_obj(json.dumps(url))
if formatter:
return await Base.JSONFormatter(url)
return url
class Trivia:
def __init__(self):
self.trivia = {"questions": {}}
async def add(
self,
question: str,
answer: str,
options: dir,
category: Optional[str] = None,
difficulty: Optional[str] = None,
) -> None:
"""
Trivia Add
--------------
Adding Trivia Question to JSON File. You can add more than one question.
You can add options to your question.
If you don't want to add options, just leave it empty.
```
from Estrapy import Trivia
EstraTrivia = Trivia()
async def create_question():
options = {"A": "Stawa", "B": "RandomPerson", "C": "Phone"}
question = await EstraTrivia.add(
question="Who is the creator of this packages?",
answer=list(options)[0], # Or "A"
options=options,
)
print(question)
```
:param question
:type str
:param answer
:type str
:param option
:type dir
:param category
:type str
:param difficulty
:type str
"""
num = 1
i = 0
with open("trivia.json", "r", encoding="utf-8") as f:
try:
trivia = json.load(f)
num = max(map(int, trivia["questions"].keys())) + 1
except:
trivia = self.trivia
try:
for _ in trivia["questions"].items():
i += 1
if trivia["questions"][str(i)]["question"] == question:
return "Trivia: Question (#{}) is already exist on Question (#{})".format(
num, i
)
except:
pass
try:
existing = {int(key) for key in trivia["questions"]}
missing = [i for i in range(1, max(existing)) if i not in existing]
num = missing[0]
except:
pass
trivia["questions"].update(
(
{
num: {
"question": question,
"answer": answer,
"options": options,
"difficulty": difficulty,
"category": category,
}
}
)
)
with open("trivia.json", "w", encoding="utf-8") as f:
json.dump(trivia, f, indent=4, ensure_ascii=False)
return "Question (#{}) added".format(num)
async def remove(self, num: int) -> None:
"""
Trivia Remove
--------------
Removing Trivia Question from JSON File in Specific Number.
It will remove at the specific number question from file.
```
from Estrapy import Trivia
EstraTrivia = Trivia()
async def remove_question():
print(await EstraTrivia.remove(1)) # Remove question number 1
```
:param num
:type int
"""
with open("trivia.json", "r", encoding="utf-8") as f:
try:
trivia = json.load(f)
except:
return "Trivia: No Question Found"
trivia["questions"].pop(str(num))
with open("trivia.json", "w", encoding="utf-8") as f:
json.dump(trivia, f, indent=4, ensure_ascii=False)
return "Trivia: Question (#{}) Removed".format(num)
async def run(self, num: Optional[int] = None, random_pick: bool = True) -> None:
"""
Trivia Run
--------------
Running Trivia Question from JSON File. You can choose to random pick question or not.
This function requires `Trivia.answer` function to run. Recommended to use `random_pick` parameter to True.
#### Examples available in https://github.com/StawaDev/Estrapy-API/blob/main/Examples/Trivia.py
:param random_pick
:type bool, default `True`
"""
if num and random_pick is not None:
return "Please put None on unnecessary parameter or leave it empty"
num = 0
with open("trivia.json", "r") as f:
File = json.load(f)
Total = len(File["questions"])
if random_pick:
num = rd.randint(1, Total)
while num <= int(Total):
_options = []
questions = File["questions"][str(num)]["question"]
answers = File["questions"][str(num)]["answer"]
options = File["questions"][str(num)]["options"]
difficulty = File["questions"][str(num)]["difficulty"]
category = File["questions"][str(num)]["category"]
for i in options:
_options.append("{}.{}".format(i, options[i]))
return questions, num, answers, _options, difficulty, category
async def answer(self, run: any, guess: str = None):
"""
Trivia Answer
--------------
Answer Trivia Question. You can guess answer or not.
This function usually used for once. Recommend to use `random_pick` parameter on `Trivia.run` function.
#### Examples available in https://github.com/StawaDev/Estrapy-API/blob/main/Examples/Trivia.py
:param run
:type any
:param guess
:type str
"""
if str.lower(guess) == str.lower(run[2]):
return True, run[2]
return False, run[2]
async def run_console(random_pick: bool = False) -> None:
"""
Trivia Run Console
--------------
Run Trivia Through Console!
This function requires Trivia.add to add the questions, answers also options.
#### Examples available in https://github.com/StawaDev/Estrapy-API/blob/main/Examples/Trivia.py
:param random_pick
:type bool, default `False`
"""
score = 0
_options = []
with open("trivia.json", "r") as f:
File = json.load(f)
Total = len(File["questions"])
if random_pick:
num = rd.randint(1, Total)
for num in range(1, Total + 1):
questions = File["questions"][str(num)]["question"]
answers = File["questions"][str(num)]["answer"]
options = File["questions"][str(num)]["options"]
difficulty = File["questions"][str(num)]["difficulty"]
category = File["questions"][str(num)]["category"]
for i in options:
_options.append("{}.{}".format(i, options[i]))
print("Question (#{}) : {}".format(num, questions))
print("Options: {}".format(", ".join(_options)))
print("Difficulty: {}".format(difficulty))
print("Category: {}".format(category))
answer = input("Answer: ")
if str.lower(answer) == str.lower(answers):
score += 1
print(
"That's correct!"
if str.lower(answer) == str.lower(answers)
else "That's incorrect!"
)
time.sleep(2)
for x in options:
_options.remove("{}.{}".format(x, options[x]))
else:
print(
"Game over! no more questions! Score: {}%".format(
int(score / Total * 100)
)
)
| StarcoderdataPython |
376476 | <reponame>gtank/blake2
#!/bin/env python3
import json
import sys
from pyblake2 import blake2s, blake2b
def write_blake2s_tests(output_fn):
fd = open(output_fn, 'w')
key_bytes = bytearray(range(32))
fd.write('[\n')
for i in range(8):
salt_bytes = bytearray(range(i+1))
test = {
"hash": "blake2s",
"in": "",
"key": key_bytes.hex(),
"persona": "",
"salt": salt_bytes.hex(),
"out": blake2s(key=key_bytes, salt=salt_bytes).hexdigest()
}
fd.write(json.dumps(test, indent=True)+',\n')
for i in range(8):
persona_bytes = bytearray(range(i+1))
test = {
"hash": "blake2s",
"in": "",
"key": key_bytes.hex(),
"persona": persona_bytes.hex(),
"salt": "",
"out": blake2s(key=key_bytes, person=persona_bytes).hexdigest()
}
fd.write(json.dumps(test, indent=True)+',\n')
for i in range(32):
length = i+1
test = {
"hash": "blake2s",
"in": "",
"key": "",
"persona": "",
"salt": "",
"length": length,
"out": blake2s(digest_size=length).hexdigest(),
}
fd.write(json.dumps(test, indent=True)+(',' if i<31 else '')+'\n')
fd.write(']')
fd.close()
def write_blake2b_tests(output_fn):
fd = open(output_fn, 'w')
key_bytes = bytearray(range(32))
fd.write('[\n')
for i in range(8):
salt_bytes = bytearray(range(i+1))
test = {
"hash": "blake2b",
"in": "",
"key": key_bytes.hex(),
"persona": "",
"salt": salt_bytes.hex(),
"out": blake2b(key=key_bytes, salt=salt_bytes).hexdigest()
}
fd.write(json.dumps(test, indent=True)+',\n')
for i in range(8):
persona_bytes = bytearray(range(i+1))
test = {
"hash": "blake2b",
"in": "",
"key": key_bytes.hex(),
"persona": persona_bytes.hex(),
"salt": "",
"out": blake2b(key=key_bytes, person=persona_bytes).hexdigest()
}
fd.write(json.dumps(test, indent=True)+',\n')
for i in range(64):
length = i+1
test = {
"hash": "blake2b",
"in": "",
"key": "",
"persona": "",
"salt": "",
"length": length,
"out": blake2b(digest_size=length).hexdigest(),
}
fd.write(json.dumps(test, indent=True)+(',' if i<63 else '')+'\n')
fd.write(']\n')
fd.close()
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: gen_vectors.py <path to blake2s output file> <path to blake2b output file>")
sys.exit(1)
write_blake2s_tests(sys.argv[1])
write_blake2b_tests(sys.argv[2])
| StarcoderdataPython |
8098525 | import robin_stocks as r
import pytz
import datetime as dt
import re
import holidays
import numpy as np
username = ''
password = ''
login = r.login(username, password)
#rlt = r.load_account_profile()
#rlt = r.load_portfolio_profile()
#rlt = r.get_all_option_positions() #Returns all option positions ever held for the account
#rlt = r.get_open_option_positions() # Returns all open option positions for the account.
#rlt = r.get_markets() # get market mic available "XANS" for nasdaq
#rlt = r.get_market_hours("XNAS", "2020-08-22")
#rlt = r.get_market_today_hours("XNAS")
#rlt = r.build_holdings()
#rlt = r.build_user_profile()
#rlt = r.get_all_positions()
#rlt = r.get_linked_bank_accounts() # bank id: 899c320a-0e80-4489-8b81-51535cd1992e
#rlt = r.get_bank_account_info("899c320a-0e80-4489-8b81-51535cd1992e")
#rlt = r.get_open_stock_positions() # get all open stock positions
#rlt = r.load_phoenix_account()
#rlt = r.get_all_open_option_orders()
#rlt = r.get_all_open_stock_orders()
#print(rlt)
#r.export_completed_option_orders("./", "test")
# a function to adjust the price given a min_ticks, min_ticks look like:
# {'above_tick': '0.05', 'below_tick': '0.01', 'cutoff_price': '3.00'} for example
def adjustPrice(instrument, price, roundup=False):
if roundup:
price = max(0.05, price)
try:
if "min_ticks" in instrument: min_ticks = instrument["min_ticks"]
else: return price
if min_ticks==None or len(min_ticks)==0: return price
cutoff = round(float(min_ticks["cutoff_price"]),2)
above_tick = round(float(min_ticks["above_tick"]), 2)
below_tick = round(float(min_ticks["below_tick"]), 2)
if price>=cutoff: tick = above_tick
elif price<cutoff: tick = below_tick
if(price%tick == 0): return price
if(roundup): return round((int(price/tick)) * tick,2)
else: return round((int(price/tick)+1) * tick,2)
except:
return price
# class dealing with checking market open
class marketTime:
def __init__(self, fname="log/market_time.log", verbose=True):
self.verbose = verbose
if self.verbose:
self.logger = r.setup_logger("marketTime", fname)
self.logger.info("Class marketTime initialized.")
self.tz = pytz.timezone('US/Eastern')
self.openTime = dt.time(hour = 9, minute = 30, second = 0)
self.closeTime = dt.time(hour = 16, minute = 0, second = 0)
self.stopTime = dt.time(hour = 15, minute = 55, second = 0)
self.mic = "XNAS" # mic for NASDAQ
self.is_open = False # open today ?
self.open_now = False # open now ?
self.next_open_date = ""
self.opennow()
def __del__(self):
# self.logger.info("Class marketTime exited.")
if self.verbose:
r.close_logger(self.logger)
else:
pass
# a function to check that if today is open or not.
def opencheck(self):
ret = r.get_market_today_hours(self.mic)
self.is_open = ret["is_open"]
match = re.search(r"\d+-\d+-\d+", ret["next_open_hours"])
if match:
self.next_open_date = match.group()
else:
self.next_open_date = "date not found!"
return self.is_open
def checkdate(self, date):
ret = r.get_market_hours(self.mic, date)
return ret["is_open"]
def now(self):
self.time = dt.datetime.now(self.tz)
return self.time
# a function to check if market is open now
def opennow(self):
now = self.now()
if(not self.opencheck()):
self.open_now = False
return False
if (now.time() >= self.openTime) and (now.time() < self.closeTime):
self.open_now = True
else:
self.open_now = False
return self.open_now
# class to load currently held stocks
class loadAccount:
def __init__(self, fname="log/load_account.log"):
self.logger = r.setup_logger("loadAccount", fname)
self.logger.info("Class loadAccount initialized.")
self.bankid = "<KEY>"
self.stocks_held = [] # dictionaries, each dict for each stock, "symbol", "shares", "shares_avai", "num_contracts", "avai_contracts", "buy_price", "current_price"
self.stocks_held100 = [] # stocks with shares greater than 100
self.stocks_avai100 = [] # stocks with available shares greater than 100
self.loadstocks()
self.option_positions = [] # dictionaries, each dict for each open option positions, "symbol" "quantity" "trade_price" "option_id" "exp_date" "strike_price" "price"
self.loadoptions()
self.open_orders = [] # list of dictionaries, each dict for each open order, "order_id" "option_id" "quantity" "position_effect" "side" "type"
self.loadorders()
def __del__(self):
# self.logger.info("Class loadAccount exited.")
r.close_logger(self.logger)
def loadstocks(self):
self.logger.info("loading stocks...")
self.stocks_held = []
self.stocks_held100 = []
self.stocks_avai100 = []
ret = r.get_open_stock_positions()
for stk in ret:
dic = {}
dic["symbol"] = r.get_instrument_by_url(stk["instrument"])["symbol"]
dic["shares"] = round(float(stk["quantity"]), 3)
dic["shares_avai"] = int(float(stk["shares_available_for_exercise"]))
dic["avai_contracts"] = int(dic["shares_avai"]/100)
dic["num_contracts"] = int(float( stk["shares_available_for_exercise"] )/100)
dic["buy_price"] = round(float(stk["average_buy_price"]), 2)
dic["current_price"] = round(float(r.get_stock_quote_by_symbol(dic["symbol"])['last_trade_price']), 2)
dic["equity"] = round(dic["current_price"] * dic["shares"],2)
if(dic["shares"]>=100):
self.stocks_held100.append(dic)
if dic["shares_avai"]>=100: self.stocks_avai100.append(dic)
self.stocks_held.append(dic)
# a function to get stock buy price given a symbol
def getStockPrice(self, sym):
if(len(self.stocks_held)>0):
for stk in self.stocks_held:
if stk["symbol"]==sym:
return stk["buy_price"]
return -1
else:
return -1
# function to load currently open options positions
def loadoptions(self):
self.logger.info("loading options...")
self.option_positions = []
ret = r.get_open_option_positions()
for opt in ret:
dic = {}
dic["symbol"] = opt["chain_symbol"]
dic["quantity"] = int(float(opt["quantity"]))
dic["trade_price"] = round(float(opt["average_price"]), 2)
if(dic["trade_price"] > 0): continue # only consider credits openning positions
dic["option_id"] = opt["option"].split('/')[-2]
instrument = r.get_option_instrument_data_by_id(dic["option_id"])
dic["exp_date"] = instrument["expiration_date"]
dic["strike_price"] = round(float(instrument["strike_price"]),2)
quote = r.get_option_market_data_by_id(dic["option_id"])
dic["price"] = round(float(quote["adjusted_mark_price"]),2)
dic["stock_price"] = self.getStockPrice(dic["symbol"])
self.option_positions.append(dic)
# function to load currently pending orders
def loadorders(self):
self.logger.info("loading open orders...")
self.open_orders = []
ret = r.get_all_open_option_orders()
for order in ret:
dic = {}
dic["order_id"] = order["id"]
dic["option_id"] = order["legs"][0]["option"].split('/')[-2]
dic["quantity"] = int(float(order["quantity"]))
dic["position_effect"] = order["legs"][0]["position_effect"]
dic["side"] = order["legs"][0]["side"]
dic["type"] = "unknown"
strategy = None
if not order["opening_strategy"]==None: strategy = order["opening_strategy"]
elif not order["closing_strategy"]==None: strategy = order["closing_strategy"]
if (not strategy==None) and "call" in strategy: dic["type"] = "call"
elif (not strategy==None) and "put" in strategy: dic["type"] = "put"
self.open_orders.append(dic)
# class to get the optimal option to trade given the stock symbol
class findOption:
def __init__(self, fname="log/findOption.log"):
self.logger = r.setup_logger("findOption", fname)
self.logger.info("Class findOption initialized.")
self.us_holidays = holidays.US()
self.today = dt.datetime.now(pytz.timezone('US/Eastern'))
def __del__(self):
# self.logger.info("Class transaction exited.")
r.close_logger(self.logger)
# give a type of option, find optimal strike price
def returnRate(self, sym, price, date, buyin_price):
opts = r.find_options_by_expiration(sym, date, "call")
dicts = []
for rlt in opts:
try:
strike_p = float(rlt['strike_price'])
bid_price = float(rlt['bid_price'])
if bid_price>0.01 and (strike_p-price)>2*bid_price and strike_p>1.1*price and strike_p>buyin_price:
dic = {}
dic["bid_price"] = bid_price
dic["strike_price"] = strike_p
dicts.append(dic)
except Exception as e:
print(e)
pass
dicts.sort(key=lambda x: x["bid_price"], reverse=True)
if(len(dicts)>0):
rate = dicts[0]['bid_price']/price
return rate,dicts[0]['bid_price']*100,dicts[0]['strike_price']
else:
return None
# a function to return a best call option to sell given a stock
def returnOption(self, sym, buyin_price=-1, nextFriday=False):
self.logger.info("looking for a call option to sell for {} ...".format(sym))
price = float(r.get_stock_quote_by_symbol(sym)['last_trade_price'])
next_friday = self.today + dt.timedelta( (3 - self.today.weekday()) % 7 + 1) # looking for the next Friday
next_friday = next_friday
if next_friday.strftime("%Y-%m-%d") in self.us_holidays:
date1 = next_friday - dt.timedelta(1)
else: date1 = next_friday
date2 = next_friday + dt.timedelta(7)
if date2.strftime("%Y-%m-%d") in self.us_holidays: date2 = date2 - dt.timedelta(1)
ndays1 = np.busday_count(self.today.strftime("%Y-%m-%d"), date1.strftime("%Y-%m-%d"), holidays=list(self.us_holidays)) + 1
ndays2 = np.busday_count(self.today.strftime("%Y-%m-%d"), date2.strftime("%Y-%m-%d"), holidays=list(self.us_holidays)) + 1
try:
(p1_rate, p1_bid_price, p1_strike_p) = self.returnRate(sym, price, date1.strftime("%Y-%m-%d"), buyin_price)
(p2_rate, p2_bid_price, p2_strike_p) = self.returnRate(sym, price, date2.strftime("%Y-%m-%d"), buyin_price)
print("bid_price {}, date {}".format(p1_bid_price, date1))
print("bid_price {}, date {}".format(p2_bid_price, date2))
except Exception as e:
self.logger.warning("ReturnOption: Fetching option failed: {}".format(e))
print(e)
return None
if (not nextFriday) and (p1_bid_price/ndays1 <= 0.7*p2_bid_price/ndays2):
return date2, p2_bid_price, p2_strike_p, price
else:
return date1, p1_bid_price, p1_strike_p, price
# class to handle with option tranctions
class transaction:
def __init__(self, fname="log/transaction.log"):
self.logger = r.setup_logger("transaction", fname)
self.logger.info("Class transaction initialized.")
self.us_holidays = holidays.US()
self.today = dt.datetime.now(pytz.timezone('US/Eastern'))
def __del__(self):
# self.logger.info("Class transaction exited.")
r.close_logger(self.logger)
# submit a sell call option given a dict, the dictionary should contain:
# "sym" "exp_date" "strike_p" "quantity" "current_price"
# after, the dictionary will contain:
# "adjusted_price" "status" "order_id" "option_id" "position_effect" "side" "type" "adjusted_price"
# Note: this is a OPEN effect with credits
def sellCallOption(self,dic):
sym = dic['symbol']
exp_date = dic['exp_date'].strftime("%Y-%m-%d")
strike_p = dic['strike_p']
quant = dic["quantity"]
price = dic["current_price"]
self.logger.info("prepare to submit the sell call order, symbol:{}, exp_date:{}, strike_price:{}, quantity: {}, current price:{}".format(sym, exp_date, strike_p, quant, price))
rlt = r.find_options_by_expiration_and_strike(sym, exp_date, strike_p, "call")
if (not len(rlt) > 0 ) or (not rlt[0]["tradability"] == "tradable"):
print("\nFAILED to submit a sell Call option: symbol {}, strike_p {}, exp_date {}\n".format(sym, strike_p ,exp_date))
self.logger.warning("cannot find available option!")
dic["adjusted_price"] = -1
dic["status"] = "Failed"
return False
bid_p = float(rlt[0]["bid_price"])
ask_p = float(rlt[0]["ask_price"])
adjusted_p = round((bid_p+ask_p)/2, 2)
# here is the price limit check, the average month return rate should be at least 0.03, that is the amortized day return rate is 0.0015
ndays = np.busday_count(self.today.strftime("%Y-%m-%d"), exp_date, holidays=list(self.us_holidays)) + 1
if(adjusted_p/ndays/price<0.0015):
new_price = round((price*0.0015*ndays+adjusted_p)/2, 2)
self.logger.warning("the adjusted price is too low {}, has been re-adjusted to {}".format(adjusted_p, new_price))
adjusted_p = new_price
instrument = r.get_option_instrument_data(sym, exp_date, strike_p, "call")
# min_ticks = instrument["min_ticks"]
adjusted_p = adjustPrice(instrument, adjusted_p, True)
print("\nSubmiting a sell Call option: symbol {}, strike_p {}, exp_date {}, adjusted_price: {}\n".format(sym, strike_p ,exp_date, adjusted_p))
dic["adjusted_price"] = adjusted_p
try:
self.logger.info("submitting the required order with adjusted price: {}".format(adjusted_p))
ret = r.order_sell_option_limit("open", "credit", adjusted_p , sym, quant, exp_date, strike_p, "call")
if ret:
self.logger.info("Order submitted, the status is: {}".format(ret["state"]))
print(" Submitted, the order status is: {}".format(ret["state"]))
dic["status"] = ret["state"]
dic["order_id"] = ret["id"]
dic["option_id"] = rlt[0]["id"]
dic["position_effect"] = "open"
dic["side"] = "sell"
dic["type"] = "call"
dic["adjusted_price"] = adjusted_p
return True
except Exception as e:
print(" Failed: {}".format(e))
self.logger.error("Submission failed: {}".format(e))
dic["status"] = "Failed"
return False
# submit a buy call option to close the option position given a dict, the dictionary should contain:
# "symbol", "quantity" "strike_price" "exp_date" "option_id"
# after, a new order dictionary will be returned
# Note: this is a CLOSE effect with debits
def buyCallOption(self,dic):
try:
quote = r.get_option_market_data_by_id(dic["option_id"])
instrument = r.get_option_instrument_data_by_id(dic["option_id"])
# min_ticks = instrument["min_ticks"]
price = round(float(quote["adjusted_mark_price"]), 2)
price = adjustPrice(instrument, price)
# check if enough funds available
profile = r.load_account_profile()
#balance = round(float(profile["cash"]), 2)
balance = round(float(profile["margin_balances"]["day_trade_buying_power"]), 2)
if price*100 < balance:
self.logger.info("Submitting to buy a option to close, sym:{}, quant:{}, exp_date:{}, strike_price:{}, price:{}".format(dic["symbol"], dic["quantity"], dic["exp_date"], dic["strike_price"],price))
ret = r.order_buy_option_limit("close", "debit", price, dic["symbol"], dic["quantity"], dic["exp_date"], dic["strike_price"], "call")
if ret:
opt = {}
opt["symbol"] = dic["symbol"]
opt["quantity"] = dic["quantity"]
opt["exp_date"] = dic["exp_date"]
opt["strike_p"] = dic["strike_price"]
opt["option_id"] = dic["option_id"]
opt["side"] = "buy"
opt["type"] = "call"
opt["position_effect"] = "close"
opt["order_id"] = ret["id"]
opt["status"] = ret["state"]
opt["adjusted_price"] = price
self.logger.info("Buy option order has been submiited, order id: {}, status: {}".format(opt["order_id"], opt["status"]))
return opt
else:
self.logger.warning("failed to buy a option!")
return None
else:
self.logger.warning("trying to buy option but not enough funds, balance:{}, price:{}".format(balance, price*100))
return None
except Exception as e:
self.logger.error(" error occured when trying to close the option: {}".format(e))
return None
# function to cancel a option order, the dictionary should contain the "order_id" and the status should be "unconfirmed" or "queued"
def cancelOrder(self, dic):
if "order_id" not in dic:
self.logger.warning("cancelOrder: cannot find order_id, returned!")
return False
order_id = dic["order_id"]
ret = r.get_option_order_info(order_id)
if ret:
if ret["state"]=="queued" or ret["state"]=="unconfirmed" or ret["state"]=="confirmed":
rlt = r.cancel_option_order(order_id)
self.logger.info("cancel option order request submitted.")
dic["status"] = "cancelled"
return True
else:
self.logger.warning("cannot cancel this order: {}, since the status is: {}".format(order_id, ret["state"]))
return False
else:
self.logger.warning("cancelOrder: cannot get the info about this order, seems order id is not correct: {}".format(order_id))
return False
if __name__ == '__main__':
# myaccount = loadAccount()
# print(myaccount.open_orders)
mytransaction = transaction()
dic={"order_id": "a4a06a12-df44-4d58-915d-21e724dc6fed" }
ret = mytransaction.cancelOrder(dic)
print(ret)
| StarcoderdataPython |
3532318 | <filename>tests/run_python3_post_pretty.py
import requests
r = requests.post("http://localhost:8000/tag", data={
"data": "Fรถrdomen har alltid sin rot i vardagslivet - <NAME>".encode("utf-8"),
"pretty": 1,
})
print(r.text)
| StarcoderdataPython |
274076 | import os
from quotes_api.app import create_app
app = create_app(configuration=os.getenv("APP_CONFIGURATION", "production"))
| StarcoderdataPython |
4908627 | <reponame>sebimarkgraf/rllib
"""Template for a Model Based Agent.
A model based agent has three behaviors:
- It learns models from data collected from the environment.
- It optimizes policies with simulated data from the models.
- It plans with the model and policies (as guiding sampler).
"""
from itertools import chain
import torch
from torch.optim import Adam
from rllib.agent.abstract_agent import AbstractAgent
from rllib.algorithms.model_learning_algorithm import ModelLearningAlgorithm
from rllib.algorithms.mpc.policy_shooting import PolicyShooting
from rllib.dataset.experience_replay import ExperienceReplay, StateExperienceReplay
from rllib.model import TransformedModel
from rllib.policy.mpc_policy import MPCPolicy
from rllib.policy.random_policy import RandomPolicy
from rllib.util.neural_networks.utilities import DisableGradient
from rllib.util.utilities import tensor_to_distribution
class ModelBasedAgent(AbstractAgent):
"""Implementation of a Model Based RL Agent.
Parameters
----------
policy_learning_algorithm: PolicyLearningAlgorithm.
model_learning_algorithm: ModelLearningAlgorithm
planning_algorithm: MPCSolver.
thompson_sampling: bool.
Flag that indicates whether or not to use posterior sampling for the model.
Other Parameters
----------------
See AbstractAgent.
"""
def __init__(
self,
dynamical_model,
reward_model,
termination_model=None,
num_rollouts=0,
train_frequency=50,
num_iter=50,
exploration_steps=0,
exploration_episodes=1,
model_learn_train_frequency=0,
model_learn_num_rollouts=1,
model_learn_exploration_steps=None,
model_learn_exploration_episodes=None,
policy_learning_algorithm=None,
model_learning_algorithm=None,
planning_algorithm=None,
thompson_sampling=False,
memory=None,
batch_size=100,
clip_grad_val=10.0,
*args,
**kwargs,
):
self.algorithm = policy_learning_algorithm
super().__init__(
num_rollouts=num_rollouts,
train_frequency=train_frequency,
num_iter=num_iter,
exploration_steps=exploration_steps,
exploration_episodes=exploration_episodes,
clip_grad_val=clip_grad_val,
batch_size=batch_size,
*args,
**kwargs,
)
self.model_learn_train_frequency = model_learn_train_frequency
self.model_learn_num_rollouts = model_learn_num_rollouts
if model_learn_exploration_steps is None:
model_learn_exploration_steps = self.exploration_steps
if model_learn_exploration_episodes is None:
model_learn_exploration_episodes = self.exploration_episodes - 1
self.model_learn_exploration_steps = model_learn_exploration_steps
self.model_learn_exploration_episodes = model_learn_exploration_episodes
self.planning_algorithm = planning_algorithm
self.model_learning_algorithm = model_learning_algorithm
self.dynamical_model = dynamical_model
self.reward_model = reward_model
self.termination_model = termination_model
assert self.dynamical_model.model_kind == "dynamics"
assert self.reward_model.model_kind == "rewards"
if self.termination_model is not None:
assert self.termination_model.model_kind == "termination"
if policy_learning_algorithm:
policy = policy_learning_algorithm.policy
elif planning_algorithm is not None:
policy = MPCPolicy(self.planning_algorithm)
else:
policy = RandomPolicy(dynamical_model.dim_state, dynamical_model.dim_action)
self.policy = policy
self.thompson_sampling = thompson_sampling
if self.thompson_sampling:
self.dynamical_model.set_prediction_strategy("posterior")
if memory is None:
memory = ExperienceReplay(max_len=100000, num_steps=0)
self.memory = memory
self.initial_states_dataset = StateExperienceReplay(
max_len=1000, dim_state=self.dynamical_model.dim_state
)
def act(self, state):
"""Ask the agent for an action to interact with the environment.
If the plan horizon is zero, then it just samples an action from the policy.
If the plan horizon > 0, then is plans with the current model.
"""
if isinstance(self.planning_algorithm, PolicyShooting):
if not isinstance(state, torch.Tensor):
state = torch.tensor(state, dtype=torch.get_default_dtype())
policy = tensor_to_distribution(
self.policy(state), **self.policy.dist_params
)
self.pi = policy
action = self.planning_algorithm(state).detach().numpy()
else:
action = super().act(state)
return action.clip(
-self.policy.action_scale.numpy(), self.policy.action_scale.numpy()
)
def observe(self, observation):
"""Observe a new transition.
If the episode is new, add the initial state to the state transitions.
Add the transition to the data set.
"""
super().observe(observation)
if self.training:
self.memory.append(observation)
if self.learn_model_at_observe:
self.model_learning_algorithm.learn(self.logger)
if (
self.train_at_observe
and len(self.memory) > self.batch_size
and self.algorithm is not None
):
self.learn()
def start_episode(self):
"""See `AbstractAgent.start_episode'."""
super().start_episode()
if self.thompson_sampling:
self.dynamical_model.sample_posterior()
def end_episode(self):
"""See `AbstractAgent.end_episode'.
If the agent is training, and the base model is a GP Model, then add the
transitions to the GP, and summarize and sparsify the GP Model.
Then train the agent.
"""
self.initial_states_dataset.append(self.last_trajectory[0].state.unsqueeze(0))
if self.model_learning_algorithm is not None and self.training:
self.model_learning_algorithm.add_last_trajectory(self.last_trajectory)
if self.learn_model_at_end_episode:
self.model_learning_algorithm.learn(self.logger)
if self.train_at_end_episode:
self.learn()
super().end_episode()
def learn(self, memory=None):
"""Learn a policy with the model."""
#
def closure():
"""Gradient calculation."""
if memory is None:
observation, *_ = self.memory.sample_batch(self.batch_size)
else:
observation, *_ = memory.sample_batch(self.batch_size)
self.optimizer.zero_grad()
losses = self.algorithm(observation.clone())
losses.combined_loss.mean().backward()
torch.nn.utils.clip_grad_norm_(
self.algorithm.parameters(), self.clip_gradient_val
)
return losses
with DisableGradient(
self.dynamical_model, self.reward_model, self.termination_model
):
self._learn_steps(closure)
@property
def learn_model_at_observe(self):
"""Raise flag if learn the model after observe."""
return (
self.training
and self.model_learning_algorithm is not None
and self.total_steps >= self.model_learn_exploration_steps
and self.total_episodes >= self.model_learn_exploration_episodes
and self.model_learn_train_frequency > 0
and self.total_steps % self.model_learn_train_frequency == 0
)
@property
def learn_model_at_end_episode(self):
"""Raise flag to learn the model at end of an episode."""
return (
self.training
and self.model_learning_algorithm is not None
and self.total_steps >= self.model_learn_exploration_steps
and self.total_episodes >= self.model_learn_exploration_episodes
and self.model_learn_num_rollouts > 0
and (self.total_episodes + 1) % self.model_learn_num_rollouts == 0
)
@classmethod
def default(
cls,
environment,
dynamical_model=None,
reward_model=None,
termination_model=None,
num_epochs=20,
model_lr=5e-4,
l2_reg=1e-4,
calibrate=True,
*args,
**kwargs,
):
"""Get a default model-based agent."""
if dynamical_model is None:
dynamical_model = TransformedModel.default(environment, *args, **kwargs)
if reward_model is None:
try:
reward_model = environment.env.reward_model()
except AttributeError:
reward_model = TransformedModel.default(
environment,
model_kind="rewards",
transformations=dynamical_model.forward_transformations,
)
if termination_model is None:
try:
termination_model = environment.env.termination_model()
except AttributeError:
pass
params = list(chain(dynamical_model.parameters(), reward_model.parameters()))
if len(params):
model_optimizer = Adam(params, lr=model_lr, weight_decay=l2_reg)
model_learning_algorithm = ModelLearningAlgorithm(
dynamical_model=dynamical_model,
reward_model=reward_model,
termination_model=termination_model,
num_epochs=num_epochs,
model_optimizer=model_optimizer,
calibrate=calibrate,
)
else:
model_learning_algorithm = None
return super().default(
environment,
dynamical_model=dynamical_model,
reward_model=reward_model,
termination_model=termination_model,
model_learning_algorithm=model_learning_algorithm,
*args,
**kwargs,
)
| StarcoderdataPython |
3591448 | """
This problem was asked by Facebook.
Given a 32-bit integer, return the number with its bits reversed.
For example, given the binary number
1111 0000 1111 0000 1111 0000 1111 0000,
return
0000 1111 0000 1111 0000 1111 0000 1111.
"""
# take a bit from number one by one add it to the resultant reversed number
# The below implementation check if the right most bit in the number is 1
# buy AND'ing the number with 1 if the result is 1 then it is added as 1 to the
# result otherwise 0, using the OR op.
def reverse_bits(number):
res = 0 # This variable stores the result from reversed bits
while number > 0:
res = res << 1
# re_bits = bin(res)
res = res | (number & 1)
# re_bits = bin(res)
# num_bits = bin(number)
number = number >> 1
# num_bits = bin(number)
return res
# simpler solution if the result of AND of number and 1 is == 1
# add 1 to result else 0
def reverse_bits_redux(number):
result = 0
while number:
result = result << 1
if number & 1 == 1:
result = result | 1
number = number >> 1
return result
if __name__ == '__main__':
assert reverse_bits(int("11110000111100001111000011110000", 2)) == int("00001111000011110000111100001111", 2)
assert reverse_bits(int("110001", 2)) == int("100011", 2)
assert reverse_bits_redux(int("11110000111100001111000011110000", 2)) == int("00001111000011110000111100001111", 2)
assert reverse_bits_redux(int("110001", 2)) == int("100011", 2)
| StarcoderdataPython |
4963271 | <gh_stars>1-10
import re
def extract_libraries(files):
"""Extracts a list of imports that were used in the files
Parameters
----------
files : []string
Full paths to files that need to be analysed
Returns
-------
dict
imports that were used in the provided files, mapped against the language
"""
res = []
# regex to find imports like org.springframework (exlude standart java libraries)
regex_import = re.compile(r'import ((?!java)[a-zA-Z0-9]*\.[a-zA-Z0-9]*)', re.IGNORECASE)
# regex to find imports like org.springframework.boot
regex_import_long = re.compile(r'import ((?!java)[a-zA-Z0-9]*\.[a-zA-Z0-9]*\.[a-zA-Z0-9]*)', re.IGNORECASE)
# regex to find static imports like org.springframework (exlude standart java libraries)
regex_static_import = re.compile(r'import static ((?!java)[a-zA-Z0-9]*\.[a-zA-Z0-9]*)', re.IGNORECASE)
# regex to find static imports like org.springframework.boot
regex_static_import_long = re.compile(r'import static ((?!java)[a-zA-Z0-9]*\.[a-zA-Z0-9]*\.[a-zA-Z0-9]*)', re.IGNORECASE)
for f in files:
with open(file=f, mode='r', errors='ignore') as fr:
contents = ' '.join(fr.readlines())
matches = regex_import.findall(contents)
matches.extend(regex_import_long.findall(contents))
matches.extend(regex_static_import.findall(contents))
matches.extend(regex_static_import_long.findall(contents))
if matches:
res.extend(matches)
# remove duplicates
res = list(set(res))
return {"Java": res} | StarcoderdataPython |
4940076 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import struct
from pcc.compiler.assembler import Assembler, ProcessorRegister, ShiftMode
# http://ref.x86asm.net/coder64.html
# https://www.amd.com/system/files/TechDocs/24594.pdf
# page 74 for
# integer calling register order:
# rdi - rsi - rdx - rcx - r8 - r9 - rest on stack
def get_register_encoding(register):
if register == ProcessorRegister.frame_pointer:
return 4
elif register == ProcessorRegister.base_pointer:
return 5
elif register == ProcessorRegister.accumulator:
return 0
elif register == ProcessorRegister.counter:
return 1
elif register == ProcessorRegister.data:
return 2
else:
encoding = process_input_regs(register)
if encoding >= 0:
return encoding
encoding = process_floating_regs(register)
if encoding >= 0:
return encoding
raise NotImplementedError
def process_input_regs(register):
if register == ProcessorRegister.integer_argument_0:
return 7
elif register == ProcessorRegister.integer_argument_1:
return 6
elif register == ProcessorRegister.integer_argument_2:
return 2
elif register == ProcessorRegister.integer_argument_3:
return 1
else:
return -1
def process_floating_regs(register):
if register == ProcessorRegister.single_scalar_0:
return 0
elif register == ProcessorRegister.single_scalar_1:
return 1
elif register == ProcessorRegister.double_scalar_0:
return 0
elif register == ProcessorRegister.double_scalar_1:
return 1
else:
return -1
def is_single_scalar_reg(register):
"""Check if the register is a single scalar register
Args:
register (ProcessorRegister): The register to check
Returns:
bool: True if the register is a single scalar register, else False
"""
if register in [ProcessorRegister.single_scalar_0,
ProcessorRegister.single_scalar_1]:
return True
else:
return False
def is_double_scalar_reg(register):
"""Check if the register is a double scalar register
Args:
register (ProcessorRegister): The register to check
Returns:
bool: True if the register is a double scalar register, else False
"""
if register in [ProcessorRegister.double_scalar_0,
ProcessorRegister.double_scalar_1]:
return True
else:
return False
class X64Assembler(Assembler):
def __init__(self):
super(X64Assembler).__init__()
def push_to_stack(self, register):
"""Push a register on stack.
Args:
register (ProcessorRegister): the register to push on stack
Returns:
bytearray: the machine code
"""
value = bytearray()
register_encoding = get_register_encoding(register)
value.append(0x50 + register_encoding)
# 0x50 == push instruction,
# the register to push is encoded and added
return value
def pop_from_stack(self, register):
"""Pop a register from stack.
Args:
register (ProcessorRegister): the register to push on stack
Returns:
bytearray: the machine code
"""
value = bytearray()
register_encoding = get_register_encoding(register)
value.append(0x58 + register_encoding)
# (0x58 == pop) + the register to pop to
return value
def copy_from_reg_to_reg(self, source, destination):
"""Copy the value from one register to another one.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code
"""
value = bytearray()
# 0x48 REX prefix with W flag set (64 bit operands)
# 0x89 MOV instruction
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
reg = get_register_encoding(source)
rm = get_register_encoding(destination)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.extend([0x48, 0x89, modr_byte])
return value
def copy_value_to_reg(self, imm_value, destination):
"""Copy the value to a register.
Args:
imm_value (int): the value to copy
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code
"""
value = bytearray()
if is_single_scalar_reg(destination):
# mov the single scalar to eax
value += bytearray([0xb8])
packed = struct.pack("f", imm_value)
value += bytearray(packed)
# movd eax to xmm0
value += bytearray([0x66, 0x0f, 0x6e])
register_encoding = get_register_encoding(destination)
value += bytearray([0xc0 + (register_encoding << 3)])
elif is_double_scalar_reg(destination):
# mov the double scalar to rax
value += bytearray([0x48, 0xb8])
packed = struct.pack("d", imm_value)
value += bytearray(packed)
# movq rax to xmm0
value += bytearray([0x66, 0x48, 0x0f, 0x6e])
register_encoding = get_register_encoding(destination)
value += bytearray([0xc0 + (register_encoding << 3)])
else:
register_encoding = get_register_encoding(destination)
value.append(0xb8 + register_encoding)
# (0xb8 == mov imm) + the register to move to
value += bytearray(struct.pack("i", imm_value))
return value
def return_to_caller(self):
"""Return to the caller routine.
Returns:
bytearray: the machine code
"""
value = bytearray()
value.append(0xc3) # ret instruction
return value
def nop(self):
"""No operation.
Returns:
bytearray: the machine code
"""
value = bytearray()
value.append(0x90) # nop
return value
def push_value_to_stack(self, value_array, stack_offset):
"""Pushes a value on stack.
Args:
value_array (bytearray): the value to push on stack
stack_offset (int): the offset from the stack pointer
Returns:
bytearray: the machine code
Raises:
ValueError: if the value_array is not correct
"""
if len(value_array) > 4:
raise ValueError("array too long")
value = bytearray()
value.append(0xc7) # mov
# Table 2-2. 32-Bit Addressing Forms with the ModR/M Byte
# indirect addressing with byte displacement
mod = 0b01
destination = ProcessorRegister.base_pointer
rm = get_register_encoding(destination)
reg = 0 # don't care
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
encoded_offset = struct.pack("b", stack_offset)
value += encoded_offset
value.extend(value_array)
if len(value_array) < 4:
padding = 4 - len(value_array)
for _ in range(padding):
value.append(0)
return value
def copy_stack_to_reg(self, stack_offset, register):
"""Copy the contents of the stack to the register
Args:
stack_offset (int): the stack offset
register (ProcessorRegister): the register to copy to
Returns:
bytearray: the machine code
"""
value = bytearray()
if is_single_scalar_reg(register):
value.extend([0xF3, 0x0F, 0x10]) # movss
elif is_double_scalar_reg(register):
value.extend([0xF2, 0x0F, 0x10]) # movsd
else:
value.append(0x8b) # mov
# Table 2-2. 32-Bit Addressing Forms with the ModR/M Byte
# indirect addressing with byte displacement
mod = 0b01
destination = ProcessorRegister.base_pointer
rm = get_register_encoding(destination)
reg = get_register_encoding(register)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
encoded_offset = struct.pack("b", stack_offset)
value += encoded_offset
return value
def copy_reg_to_stack(self, stack_offset, register):
"""Copy the contents of the register to the stack
Args:
stack_offset (int): the stack offset
register (ProcessorRegister): the register to copy from
Returns:
bytearray: the machine code
"""
value = bytearray()
if is_single_scalar_reg(register):
value.extend([0xF3, 0x0F, 0x11]) # movss
elif is_double_scalar_reg(register):
value.extend([0xF2, 0x0F, 0x11]) # movsd
else:
value.append(0x89) # mov
# Table 2-2. 32-Bit Addressing Forms with the ModR/M Byte
# indirect addressing with byte displacement
mod = 0b01
destination = ProcessorRegister.base_pointer
rm = get_register_encoding(destination)
reg = get_register_encoding(register)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
encoded_offset = struct.pack("b", stack_offset)
value += encoded_offset
return value
def add(self, source, destination):
"""Add the value of the source to the destination.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code
"""
value = bytearray()
if is_single_scalar_reg(source):
value.extend([0xF3, 0x0F, 0x58]) # addss
rm = get_register_encoding(source)
reg = get_register_encoding(destination)
elif is_double_scalar_reg(source):
value.extend([0xF2, 0x0F, 0x58]) # addsd
rm = get_register_encoding(source)
reg = get_register_encoding(destination)
else:
value.append(0x01) # ADD
rm = get_register_encoding(destination)
reg = get_register_encoding(source)
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def sub(self, source, destination):
"""Subtract the value of the source from the destination.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code
"""
value = bytearray()
if is_single_scalar_reg(source):
value.extend([0xF3, 0x0F, 0x5c]) # subss
rm = get_register_encoding(source)
reg = get_register_encoding(destination)
elif is_double_scalar_reg(source):
value.extend([0xF2, 0x0F, 0x5c]) # subsd
rm = get_register_encoding(source)
reg = get_register_encoding(destination)
else:
value.extend([0x48, 0x29]) # sub
rm = get_register_encoding(destination)
reg = get_register_encoding(source)
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def div(self, source, destination):
"""Divide the value of the source by the destination.
Store the result in the dividend register.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code
"""
value = bytearray()
dividend = destination
divider = source
if is_single_scalar_reg(divider):
value.extend([0xF3, 0x0F, 0x5E]) # divss
mod = 0b11
rm = get_register_encoding(divider)
reg = get_register_encoding(dividend)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
elif is_double_scalar_reg(divider):
value.extend([0xF2, 0x0F, 0x5E]) # divsd
mod = 0b11
rm = get_register_encoding(divider)
reg = get_register_encoding(dividend)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
else:
# idiv eax = edx:eax / divider
if divider == ProcessorRegister.accumulator:
tmp_reg = ProcessorRegister.data
value += self.copy_from_reg_to_reg(destination=divider,
source=tmp_reg)
divider = tmp_reg
# so dividend is no accumulator
tmp_reg = ProcessorRegister.accumulator
value += self.copy_from_reg_to_reg(destination=dividend,
source=tmp_reg)
tmp_reg = ProcessorRegister.counter
value += self.copy_from_reg_to_reg(destination=divider,
source=tmp_reg)
divider = tmp_reg
src = ProcessorRegister.accumulator
value += self.copy_from_reg_to_reg(destination=dividend,
source=src)
# mov eax -> edx
src = ProcessorRegister.accumulator
dest = ProcessorRegister.data
value += self.copy_from_reg_to_reg(destination=dest,
source=src)
# shift edx by 31 -> contains the highest bits of the dividend,
# eax the lowest 31 bits
value += self.shift(ProcessorRegister.data,
ShiftMode.right_arithmetic,
amount=31)
value.append(0xf7) # idiv
mod = 0b11
rm = get_register_encoding(divider)
reg = 7 # F7 /7 -> 7 in the reg field
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
# the result is stored in the acc register, so copy it to the
# correct result register if needed
if destination != ProcessorRegister.accumulator:
register = ProcessorRegister.accumulator
value += self.copy_from_reg_to_reg(register, dividend)
return value
def mul(self, destination, source):
"""Multiply the value of the source by the destination.
destination = source * destination
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the source register
Returns:
bytearray: the machine code
"""
value = bytearray()
if is_single_scalar_reg(destination):
value.extend([0xF3, 0x0F, 0x59]) # mulss
mod = 0b11
reg = get_register_encoding(destination)
rm = get_register_encoding(source)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
elif is_double_scalar_reg(destination):
value.extend([0xF2, 0x0F, 0x59]) # mulsd
mod = 0b11
reg = get_register_encoding(destination)
rm = get_register_encoding(source)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
else:
value.extend([0x0F, 0xAF]) # imul
mod = 0b11
reg = get_register_encoding(destination)
rm = get_register_encoding(source)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def shift(self, register, mode, amount):
"""Shift the register.
Args:
register (ProcessorRegister): the register to shift
mode (ShiftMode): the mode to shift
amount (int): the shift amount
Returns:
bytearray: the machine code
Raises:
NotImplementedError: if the mode is not yet implemented
"""
value = bytearray()
if mode == ShiftMode.right_arithmetic:
# SAR r/m32, imm8
value.append(0xC1)
mod = 0b11
rm = get_register_encoding(register)
reg = 7 # C1 /7 ib -> 7 in reg field
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
encoded_amount = struct.pack("b", amount)
value += encoded_amount
elif mode == ShiftMode.left_arithmetic:
# SAL r/m32, imm8
value.append(0xC1)
mod = 0b11
rm = get_register_encoding(register)
reg = 4 # C1 /4 ib -> 4 in reg field
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
encoded_amount = struct.pack("b", amount)
value += encoded_amount
else:
raise NotImplementedError
return value
def cmp(self, register_1, register_2):
"""Compare the 2 registers.
Args:
register_1 (ProcessorRegister): the first register
register_2 (ProcessorRegister): the second register
Returns:
bytearray: the machine code
"""
value = bytearray()
# CMP r/m32, r32
value.append(0x39)
mod = 0b11
rm = get_register_encoding(register_1)
reg = get_register_encoding(register_2)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def cmp_against_const(self, register, const):
"""Compare the 2 registers.
Args:
register (ProcessorRegister): the register
const (int): the const value
Returns:
bytearray: the machine code
"""
value = bytearray()
# CMP r/m32, imm32
value.append(0x81)
mod = 0b11
rm = get_register_encoding(register)
reg = 7
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
encoded_const = struct.pack("i", const)
value += encoded_const
return value
def je(self, jump_distance):
"""Jump if the equals flag is set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# JE rel8
value.append(0x74)
encoded_amount = struct.pack("b", jump_distance)
value += encoded_amount
return value
def jne(self, jump_distance):
"""Jump if the equals flag is not set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# 0F 85 cd JNE rel32
value.extend([0x0F, 0x85])
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def jge(self, jump_distance):
"""Jump if the greater or equal flags are set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# 0F 8D cd JGE rel32
value.extend([0x0F, 0x8D])
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def jle(self, jump_distance):
"""Jump if the less or equal flags are set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# 0F 8E cw JLE rel32
value.extend([0x0F, 0x8E])
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def jg(self, jump_distance):
"""Jump if the greater flags are set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# 0F 8F cd JG rel32
value.extend([0x0F, 0x8F])
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def jl(self, jump_distance):
"""Jump if the less flags are set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# 0F 8C cd JL rel32
value.extend([0x0F, 0x8C])
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def jmp(self, jump_distance):
"""Jump.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code #noqa I202
"""
value = bytearray()
# JMP rel32
value.append(0xe9)
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def bitwise_and(self, source, destination):
"""Bitwise and the value of the source to the destination.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code #noqa I202
"""
value = bytearray()
value.append(0x21) # AND r/m32, r32
rm = get_register_encoding(destination)
reg = get_register_encoding(source)
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def bitwise_or(self, source, destination):
"""Bitwise or the value of the source to the destination.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code #noqa I202
"""
value = bytearray()
value.append(0x09) # OR r/m32, r32
rm = get_register_encoding(destination)
reg = get_register_encoding(source)
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def bitwise_xor(self, source, destination):
"""Bitwise xor the value of the source to the destination.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code #noqa I202
"""
value = bytearray()
value.append(0x31) # XOR r/m32, r32
rm = get_register_encoding(destination)
reg = get_register_encoding(source)
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def bitwise_not(self, destination):
"""Bitwise xor the value of the source to the destination.
Args:
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code #noqa I202
"""
value = bytearray()
value.append(0xf7) # F7 /2 NOT r/m32
rm = get_register_encoding(destination)
reg = 2 # F7 /2 NOT r/m32
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def logical_and(self, source, destination):
"""Logical and the value of the source to the destination.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code
"""
value = bytearray()
value.append(0x85) # TEST r/m32, r32
rm = get_register_encoding(destination)
reg = get_register_encoding(source)
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
# clean the destination register, and only if the zero flag is set
# set the bits in the destination register
value += self.copy_value_to_reg(0, destination)
# the zero flag will be set if the and was zero
value += self.setnz(destination)
value += self.movzx(destination, destination)
return value
def setnz(self, destination):
"""Set destination if the zero flag is not set.
Args:
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code
"""
value = bytearray()
value.extend([0x0F, 0x95]) # SETNZ r/m8
rm = get_register_encoding(destination)
reg = 0 # don't care
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def mov_to_displacement(self, register, displacement):
"""Move the value from the register to the displacement.
Args:
register (ProcessorRegister): the destination register
displacement (int): the displacement offset
Returns:
bytearray: the machine code #noqa I202
"""
value = bytearray()
# 89 /r MOV r/m32,r32
value.append(0x89)
rm = 5 # disp32
reg = get_register_encoding(register)
mod = 0b00
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
displacement_offset = len(value)
encoded_displacement = struct.pack("i", displacement)
value += encoded_displacement
return value, displacement_offset
def mov_from_displacement(self, register, displacement):
"""Move the value from the displacement to the register.
Args:
register (ProcessorRegister): the destination register
displacement (int): the displacement offset
Returns:
bytearray: the machine code
int: the displacement offset in the machine code
for the displacement
"""
value = bytearray()
# 8B /r MOV r32,r/m32
value.append(0x8b)
rm = 5 # disp32
reg = get_register_encoding(register)
mod = 0b00
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
displacement_offset = len(value)
encoded_displacement = struct.pack("i", displacement)
value += encoded_displacement
return value, displacement_offset
def movzx(self, source, destination):
"""Move from source to destination with sign extend.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code
"""
value = bytearray()
value.extend([0x0F, 0xB6]) # MOVZX r32, r/m8
rm = get_register_encoding(source)
reg = get_register_encoding(destination)
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def call(self, displacement):
"""call the symbol with the specified displacement
Args:
displacement (int): the displacement offset
Returns:
bytearray: the machine code
int: the displacement offset in the machine code
for the displacement
"""
value = bytearray()
# E8 cd CALL rel32
value.append(0xe8)
displacement_offset = len(value)
encoded_displacement = struct.pack("i", displacement)
value += encoded_displacement
return value, displacement_offset
| StarcoderdataPython |
3227084 | <filename>sfa/methods/Remove.py
from sfa.util.faults import *
from sfa.util.xrn import Xrn
from sfa.util.method import Method
from sfa.util.parameter import Parameter, Mixed
from sfa.trust.credential import Credential
class Remove(Method):
"""
Remove an object from the registry. If the object represents a PLC object,
then the PLC records will also be removed.
@param cred credential string
@param type record type
@param xrn human readable name of record to remove (hrn or urn)
@return 1 if successful, faults otherwise
"""
interfaces = ['registry']
accepts = [
Parameter(str, "Human readable name of slice to instantiate (hrn or urn)"),
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
Mixed(Parameter(str, "Record type"),
Parameter(None, "Type not specified")),
]
returns = Parameter(int, "1 if successful")
def call(self, xrn, creds, type):
xrn=Xrn(xrn,type=type)
# validate the cred
valid_creds = self.api.auth.checkCredentials(creds, "remove")
self.api.auth.verify_object_permission(xrn.get_hrn())
#log the call
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tmethod-name: %s\tcaller-hrn: %s\ttarget-urn: %s"%(
self.api.interface, self.name, origin_hrn, xrn.get_urn()))
manager = self.api.get_interface_manager()
return manager.remove(self.api, xrn)
| StarcoderdataPython |
6541650 | <filename>sordini.py<gh_stars>0
from kafka import KafkaProducer
import zlib
import os
try:
defaultKafkaBroker = os.getenv("SORDINI_BROKER")
except:
kErr("SORDINI_BROKER env variable not set - any calls without the broker specified will fail")
defaultKafkaBroker = None
try:
defaultKafkaTopic = os.getenv("SORDINI_TOPIC")
except:
kErr("SORDINI_TOPIC env variable not set - any calls without the topic specified will fail")
defaultKafkaBroker = None
def kErr(errMessage):
print("KAFKAESQUE ERROR (you know there's an error, but you don't know who made it, why you're here, or what that strange castle is)")
print(" - ".join(errMessage))
def k | StarcoderdataPython |
5096480 | from .models import Movie, Task
from rest_framework import serializers
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
def getFields(model):
""" Dynamic way of adding all fields manually instead of '__all__' """
immutable_list = model._meta.get_fields()
fields_list = []
for field in immutable_list:
fields_list.append(field.attname)
return fields_list
class MovieSerializer(serializers.HyperlinkedModelSerializer):
""" Movie Serializer """
class Meta:
model = Movie
fields = getFields(Movie)
class Ipserializer(serializers.Serializer):
""" IP Lookup Serializer """
ip = serializers.IPAddressField()
response = serializers.JSONField()
class UserSerializer(serializers.ModelSerializer):
""" User Serializer """
# Never should be able to read passwords
password1 = serializers.CharField(write_only=True)
password2 = serializers.CharField(write_only=True)
def validate(self, data):
if data['password1'] != data['<PASSWORD>']:
raise serializers.ValidationError('Passwords must match.')
return data
def create(self, validated_data):
data = {
key: value for key, value in validated_data.items()
if key not in ('<PASSWORD>', '<PASSWORD>')
}
data['password'] = <PASSWORD>['<PASSWORD>']
return self.Meta.model.objects.create_user(**data)
class Meta:
model = get_user_model()
# fields = getFields(User)
# fields = ('id', 'username', 'posts')
# Below is from testdriven.io tutorial
fields = (
'id', 'username', '<PASSWORD>', '<PASSWORD>',
'first_name', 'last_name', 'email',
)
read_only_fields = ('id',)
class TaskSerializer(serializers.ModelSerializer):
""" Task Serializer """
# owner = serializers.ReadOnlyField(source='owner.username') # Show username instead of User ID
class Meta:
# ordering = ['-id'
model = Task
fields = '__all__'
read_only_fields = ('id', 'ident', 'created', 'updated',)
class ReadOnlyTaskSerializer(serializers.ModelSerializer):
owner = UserSerializer()
class Meta:
model = Task
fields = '__all__'
| StarcoderdataPython |
1981238 | import pytest
class TestUserPermissions:
@pytest.mark.django_db
def test_user(self, user_factory):
jimmy = user_factory()
assert jimmy.has_perm('view_user', jimmy)
assert jimmy.has_perm('change_user', jimmy)
assert jimmy.has_perm('delete_user', jimmy)
@pytest.mark.django_db
def test_user_in_team(self, user_factory, team_factory):
jimmy = user_factory()
bobby = user_factory()
team_factory(members=[jimmy, bobby])
assert jimmy.has_perm('view_user', bobby)
assert not jimmy.has_perm('change_user', bobby)
assert not jimmy.has_perm('delete_user', bobby)
assert bobby.has_perm('view_user', jimmy)
assert not bobby.has_perm('change_user', jimmy)
assert not bobby.has_perm('delete_user', jimmy)
@pytest.mark.django_db
def test_user_other_team(self, user_factory):
jimmy = user_factory()
bobby = user_factory()
assert not jimmy.has_perm('view_user', bobby)
assert not jimmy.has_perm('change_user', bobby)
assert not jimmy.has_perm('delete_user', bobby)
assert not bobby.has_perm('view_user', jimmy)
assert not bobby.has_perm('change_user', jimmy)
assert not bobby.has_perm('delete_user', jimmy)
| StarcoderdataPython |
399690 | #!/usr/bin/python
"""
Configure and run tools
"""
from subprocess import call
import os
import sys
ENV_RESOURCES_PATH = os.getenv("RESOURCES_PATH", "/resources")
ENV_WORKSPACE_TYPE = os.getenv("WORKSPACE_TYPE", "cpu")
ENV_WORKSPACE_HOME = os.getenv("WORKSPACE_HOME", "/workspace")
ENV_WORKSPACE_BASE_URL = os.getenv("WORKSPACE_BASE_URL", "/")
# start vnc server
if not ENV_WORKSPACE_BASE_URL.startswith("/"):
ENV_WORKSPACE_BASE_URL = "/" + ENV_WORKSPACE_BASE_URL
DESKTOP_PATH = os.getenv("HOME", "/root") + "/Desktop"
# Create Jupyter Shortcut
shortcut_metadata = '[Desktop Entry]\nVersion=1.0\nType=Link\nName=Jupyter Notebook\nComment=\nCategories=Development;\nIcon=' + ENV_RESOURCES_PATH + '/icons/jupyter-icon.png\nURL=http://localhost:8091' + ENV_WORKSPACE_BASE_URL
call('printf "' + shortcut_metadata + '" > ' + DESKTOP_PATH + '/jupyter.desktop', shell=True) # create a link on the Desktop to your Jupyter notebook server
call('chmod +x ' + DESKTOP_PATH + '/jupyter.desktop', shell=True) # Make executable
call('printf "' + shortcut_metadata + '" > /usr/share/applications/jupyter.desktop', shell=True) # create a link in categories menu to your Jupyter notebook server
call('chmod +x /usr/share/applications/jupyter.desktop', shell=True) # Make executable
# Create Jupyter Lab Shortcut
shortcut_metadata = '[Desktop Entry]\nVersion=1.0\nType=Link\nName=Jupyter Lab\nComment=\nCategories=Development;\nIcon=' + ENV_RESOURCES_PATH + '/icons/jupyterlab-icon.png\nURL=http://localhost:8091' + ENV_WORKSPACE_BASE_URL + "lab"
call('printf "' + shortcut_metadata + '" > /usr/share/applications/jupyterlab.desktop', shell=True) # create a link in categories menu to your Jupyter Lab server
call('chmod +x /usr/share/applications/jupyterlab.desktop', shell=True) # Make executable
# start the tools we want to offer in Jupyter
SCRIPTS_DIR = ENV_RESOURCES_PATH + "/scripts"
#call(SCRIPTS_DIR + "/start_ungit.sh " + str(8051) + " &", shell=True)
#call(SCRIPTS_DIR + "/start_glances.sh " + str(8053) + " &", shell=True)
#call(SCRIPTS_DIR + "/start_vscode.sh " + str(8054) + " &", shell=True)
# The tool execution must be in this order, however,
# because otherwise netdata raises an 'Insufficient Permissions' error.
# I guess, some other tool messes with the permissions.
if ENV_WORKSPACE_TYPE == 'gpu':
# TODO is this really needed? - fix permission in dockerfile
# The 'find' command is only relevant for the GPU container.
call("find / -name *nvidia* -exec sudo chmod -R --quiet a+rwx {} +", shell=True)
# Start netdata with provided netdata config on port 8050
# call("/usr/sbin/netdata", shell=True)
| StarcoderdataPython |
3277070 | from typing import Iterator, List
from dataclasses import dataclass, field
from pathlib import Path
import random
import tempfile
import unittest
from rlbot.training.training import Pass, Fail, FailDueToExerciseException
from rlbottraining.common_graders.timeout import FailOnTimeout
from rlbottraining.exercise_runner import run_playlist
from rlbottraining.history.exercise_result import ExerciseResult
class CommonExercisesTest(unittest.TestCase):
'''
This test runs common exercises with their default configs
and checks that they perform as expected.
'''
def assertGrades(self, result_iter: Iterator[ExerciseResult], want_grades: List[str]):
got_grades = []
for result in result_iter:
if isinstance(result.grade, FailDueToExerciseException):
self.fail(str(result.grade))
break
got_grades.append(result.grade.__class__.__name__)
self.assertEqual(got_grades, want_grades)
# def test_ball_prediction(self):
# from rlbottraining.common_exercises.ball_prediction import make_default_playlist
# results = list(run_playlist(make_default_playlist()))
# self.assertEqual(len(results), 2)
# for result in results:
# self.assertIsInstance(result.grade, Pass)
def test_bakkes_mod_import(self):
from rlbottraining.common_exercises.bakkesmod_import.bakkesmod_importer import make_default_playlist
playlist = make_default_playlist()
assert len(playlist) > 2
playlist = playlist[:2] # for making tests run quicker
results = list(run_playlist(playlist))
self.assertEqual(len(results), 2)
for result in results:
# All of these exercises are too advanced for SimpleBot.
self.assertIsInstance(result.grade, Fail)
def assertNextGrade(self, result_iter: Iterator[ExerciseResult], expected_grade_class: type):
result = next(result_iter)
if not isinstance(result.grade, expected_grade_class):
self.Fail(f'exercise "{result.exercise}" got a "{result.grade}" grade; want a {expected} grade.')
def test_dribbling(self):
from rlbottraining.common_exercises.dribbling import make_default_playlist
self.assertGrades(
run_playlist(make_default_playlist()),
['FailDueToTimeout']
)
def test_bronze_goalie(self):
from rlbottraining.common_exercises.bronze_goalie import make_default_playlist
self.assertGrades(
run_playlist(make_default_playlist()),
['Pass']
)
def test_bronze_striker(self):
from rlbottraining.common_exercises.bronze_striker import make_default_playlist
result_iter = run_playlist(make_default_playlist())
result = next(result_iter)
self.assertEqual(result.exercise.name, 'Facing ball')
self.assertIsInstance(result.grade, Pass)
result = next(result_iter)
self.assertEqual(result.exercise.name, 'Rolling Shot')
self.assertIsInstance(result.grade, Pass)
result = next(result_iter)
self.assertEqual(result.exercise.name, 'Facing directly away from ball')
self.assertIsInstance(result.grade, Fail) # SimpleBot isn't smart enough.
result = next(result_iter)
self.assertEqual(result.exercise.name, 'Facing away from ball 1')
self.assertIsInstance(result.grade, Pass)
result = next(result_iter)
self.assertEqual(result.exercise.name, 'Facing away from ball 2')
self.assertIsInstance(result.grade, Pass)
result = next(result_iter)
self.assertEqual(result.exercise.name, 'Facing away from opponents goal')
self.assertIsInstance(result.grade, FailOnTimeout.FailDueToTimeout)
with self.assertRaises(StopIteration):
next(result_iter)
with self.assertRaises(StopIteration):
next(result_iter)
def test_silver_goalie(self):
from rlbottraining.common_exercises.silver_goalie import make_default_playlist
self.assertGrades(
run_playlist(make_default_playlist()),
['WrongGoalFail', 'WrongGoalFail', 'WrongGoalFail']
)
def test_silver_striker(self):
from rlbottraining.common_exercises.silver_striker import make_default_playlist
self.assertGrades(
run_playlist(make_default_playlist()),
['FailDueToTimeout']
)
def test_wall_play(self):
from rlbottraining.common_exercises.wall_play import make_default_playlist
self.assertGrades(
run_playlist(make_default_playlist()),
['FailDueToTimeout']
)
def test_kickoff(self):
from rlbottraining.common_exercises.kickoff_exercise import make_default_playlist
self.assertGrades(
run_playlist(make_default_playlist()),
# There are five kickoffs, one for each position, in the default playlist.
['PassDueToMovedBall','PassDueToMovedBall','PassDueToMovedBall','PassDueToMovedBall','PassDueToMovedBall']
)
# # Commented out because RLBot has a bug where it doesn't like chaning the number of players.
# def test_versus_line_goalie(self):
# from rlbottraining.common_exercises.versus_line_goalie import make_default_playlist
# self.assertGrades(
# run_playlist(make_default_playlist()),
# ['FailDueToTimeout', 'FailDueToTimeout']
# )
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
131851 | <gh_stars>0
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from .models import Post, Comment
from .forms import CommentForm, PostForm, LoginForm
import json
# Create your views here.
def index(request):
latest_blog_posts = Post.objects.order_by('-created')[:5]
return render(request, 'blog/index.html', {'posts': latest_blog_posts})
def view(request, post_id):
post = get_object_or_404(Post, pk=post_id)
comments = post.get_post_comments_published()
if request.method == 'POST':
form = CommentForm(request.POST)
# validate form
if form.is_valid():
obj = form.save(commit=False)
obj.post = Post.objects.get(pk=post_id)
obj.save()
else:
form = CommentForm()
return render(request, 'blog/view.html', {
'post': post,
'comments': comments,
'commentform': form
})
def profile(request):
current_user = request.user
return render(request, 'blog/profile.html', {'user': current_user})
def newpost(request):
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
user = request.user
obj = form.save(commit=False)
obj.owner = User.objects.get(pk=user.id)
obj.save()
else:
form = PostForm()
return render(request, 'blog/newpost.html', {
'postform': form
})
def edit_post(request, post_id):
post = get_object_or_404(Post, pk=post_id)
form = PostForm(request.POST or None, instance=post)
success = False
if form.is_valid():
form.save()
success = True
return render(request, 'blog/edit_post.html', {
'editform': form,
'success': success,
'post_id': post_id
})
def postcomments(request):
user = request.user
posts = Post.objects.filter(owner__id__exact=user.id)
comments = []
for post in posts:
post_comments = post.get_post_comments_all()
comments.append(post_comments)
return render(request, 'blog/postcomments.html', {
'comments': comments,
'posts': posts
})
def posts_by_tag(request, slug):
posts = Post.objects.filter(tags__name__iexact=slug)
return render(request, 'blog/posts_by_tag.html', {
'posts': posts,
'slug': slug
})
def user_login(request):
form = LoginForm()
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/blog')
else:
return HttpResponse('Your account is not activated')
else:
form = LoginForm()
return render(request, 'blog/login.html', {'loginform': form})
def user_logout(request):
logout(request)
return HttpResponseRedirect('/blog')
def user_posts(request):
user = request.user
posts = Post.objects.filter(owner__id__exact=user.id)
return render(request, 'blog/user_posts.html', {'posts': posts})
def change_comment_status(request):
"""
Accept AJAX request
I have no idea if this is a good practice or not. /facepalm
"""
ret = json.dumps({'status': '0'})
if request.method == 'POST':
comment_id = request.POST.get('comment_id')
status = request.POST.get('status')
comment = Comment.objects.get(pk=comment_id)
comment.status = status
comment.save()
ret = json.dumps({'status': '1'})
return HttpResponse(ret)
| StarcoderdataPython |
4999164 | <reponame>thefstock/FirstockPy
"""
Request and response models for save fcm token
"""
from typing import Optional
from datetime import datetime
from pydantic import BaseModel
from ....common.enums import ResponseStatus
from ....utils.decoders import build_loader, datetime_decoder
__all__ = ['SaveFCMTokenRequestModel', 'SaveFCMTokenResponseModel']
class SaveFCMTokenRequestModel(BaseModel):
"""
The request model for logout endpoint
"""
uid: str
"""The user id of the login user"""
fcmtkn: str
"""FCM token collected from device"""
class SaveFCMTokenResponseModel(BaseModel):
"""
The response model for logout endpoint
"""
stat: ResponseStatus
"""The logout success or failure status"""
request_time: Optional[datetime]
"""It will be present only on successful response."""
emsg: Optional[str]
"""Error message if the request failed"""
class Config:
"""model configuration"""
json_loads = build_loader({
"request_time": datetime_decoder()
}) | StarcoderdataPython |
5162945 | <reponame>john04047210/mira_wepy_server
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2018 QiaoPeng.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
"""weixin program develop"""
# TODO: This is an example file. Remove it if your package does not use any
# extra configuration variables.
WXPY_INDEX_DEFAULT_VALUE = 'foobar'
"""Default value for the application."""
WXPY_INDEX_BASE_TEMPLATE = 'wxpy_index/base.html'
"""Default base template for the demo page."""
CACHE_TYPE = 'redis'
CACHE_REDIS_URL = 'redis://wepy_redis:6379/1'
"""Redis location and database."""
CACHE_DEFAULT_TIMEOUT = 3600
WXPY_APPID = {
'wxa55f028bafde4230': {
'appsecret': '<KEY>',
'with_ticket': False
},
'wxb4dc1e6c1d807494': {
# wepy.shilikaif.com/mall
'appsecret': '<KEY>',
'with_ticket': True
},
'wx23aa0f72539f1dbc': {
# lingweimeizhuang
'appsecret': 'cf835a2c2464811ad3c588e2786588fb',
'with_ticket': True
}
}
WXPY_APPID_DEF = 'wxa55f028bafde4230' # ๆฅๆฌ่ช็ๅญฆ็ฟ
WXPY_SCHEMA = 'https:'
WXPY_BASE_URL = WXPY_SCHEMA + '//api.weixin.qq.com'
WXPY_CODE2SESSION_URL = WXPY_BASE_URL + '/sns/jscode2session'
WXPY_GET_TOKEN_URL = WXPY_BASE_URL + '/cgi-bin/token'
WXPY_GET_TICKET_URL = WXPY_BASE_URL + '/cgi-bin/ticket/getticket'
| StarcoderdataPython |
3570016 | <reponame>scottwedge/OpenStack-Stein
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from searchlight.api.middleware import context
import searchlight.context
import searchlight.tests.utils as test_utils
class TestContextMiddleware(test_utils.BaseTestCase):
def test_response(self):
middleware = context.ContextMiddleware(None)
req = webob.Request.blank('/')
req.context = searchlight.context.RequestContext()
request_id = req.context.request_id
resp = webob.Response()
resp.request = req
middleware.process_response(resp)
self.assertEqual(request_id, resp.headers['x-openstack-request-id'])
resp_req_id = resp.headers['x-openstack-request-id']
# Validate that request-id do not starts with 'req-req-'
self.assertFalse(resp_req_id.startswith('req-req-'))
self.assertTrue(resp_req_id.startswith('req-'))
def test_is_admin_project(self):
middleware = context.ContextMiddleware(None)
req = webob.Request.blank('/')
req_context = middleware._get_authenticated_context(req)
self.assertTrue(req_context.is_admin_project)
req = webob.Request.blank('/', headers={'X-Is-Admin-Project': 'True'})
req_context = middleware._get_authenticated_context(req)
self.assertTrue(req_context.is_admin_project)
req = webob.Request.blank('/', headers={'X-Is-Admin-Project': 'False'})
req_context = middleware._get_authenticated_context(req)
self.assertFalse(req_context.is_admin_project)
| StarcoderdataPython |
1697119 | <reponame>DouglasYuen/LocalisationCleaner<gh_stars>0
import sys
## Main function, reads from the filename passed in and writes to the output file specified
## The expected number of command line arguments has a size of 3, so don't run if not all parameters are present
def Main():
if len(sys.argv) == 3:
inputName = sys.argv[1]
outputName = sys.argv[2]
convertFile(inputName, outputName)
else:
print("Please ensure you have entered the command line arguments as follows: \n python3 LocalisationConverter <input file name> <output file name>")
print("Note that the extension *.strings is not required.")
## Reads from the file with the inputName and writes the results to the outputName
## On success, print that the file with the name specified was created
## Otherwise, if the input file cannot be found, halt execution and print an error for the user
def convertFile(inputName, outputName):
try:
# Open the file with the inputName in read mode
inputFileName = inputName + ".strings"
inputFile = open(inputFileName, "r")
lineArray = []
# In each line of the strings file, if it is a string definition, replace the string right of the equal sign with an empty string
# Otherwise, if it is not a definition, write the line as it was back to the output file
for line in inputFile:
rowResult = line.split("=")
if len(rowResult) == 2:
rowResult[1] = "= \"\"; \n"
textResult = ''.join(map(str, rowResult))
lineArray.append(textResult)
else:
lineArray.append(line)
# Create the output file and write the lines to it
outputFileName = outputName + ".strings"
outputFile = open(outputFileName, "w")
outputFile.writelines(lineArray)
outputFile.close()
print("Success: your output file was written to {name}.strings".format(name = outputName))
except:
print("Error: no file {name} with the .strings extension was found.".format(name = inputName))
## Run the program by calling the Main() function
Main() | StarcoderdataPython |
6537766 | <filename>federal/2013/code/budget2013_common.py
# coding: utf-8
import codecs
import pickle
import json
import re
import argparse
def join_lines(lines):
return " ".join([line.strip() for line in lines])
epsilon = 1e-4
def numbers_equal(n1, n2):
return abs(n1 - n2) < epsilon
def parse_value(s):
return float(s.replace(",", ".").replace(" ", ""))
class JsonSimpleEncoder(json.JSONEncoder):
def default(self, o):
return o.__dict__
def write_pickle_document(document, output_file_name):
with open(output_file_name, "wb") as output_file:
pickle.dump(document, output_file)
def write_text_document(document, output_file_name, write_func):
with codecs.open(output_file_name, "w", encoding = "utf-8") as output_file:
write_func(output_file, document)
def write_json_document(document, output_file_name, json_encoder_class):
with open(output_file_name, "w") as output_file:
json.dump(document, output_file, cls = json_encoder_class)
def write_json_pretty_document(document, output_file_name, json_encoder_class):
with codecs.open(output_file_name, "w", encoding = "utf-8") as output_file:
s = json.dumps(document, cls = json_encoder_class, indent = 4)
replaced = replace_json_unicode_symbols(s)
output_file.write(replaced)
def replace_json_unicode_symbols(s):
replaced = re.sub(r"\\u([0-9a-fA-F]{4})", lambda m: unichr(int(m.group(1), 16)), s)
return replaced
def get_default_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest="input_file_name", required=True, help="Path to the input text file")
parser.add_argument("--output-pickle", dest="output_pickle_file_name", help="Path to the output pickle file")
parser.add_argument("--output-text", dest="output_text_file_name", help="Path to the output text file")
parser.add_argument("--output-json", dest="output_json_file_name", help="Path to the output JSON file")
parser.add_argument("--output-json-pretty", dest="output_json_pretty_file_name", help="Path to the output JSON file with indents and russian letters")
return parser
| StarcoderdataPython |
1797372 | from optparse import OptionParser
import os
import sys
################################################################################
# generate_activations.py
#
# Create a tsv activations file for each of the flanks found in the flanks
# fasta file.
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <flanks_fa_file>'
parser = OptionParser(usage)
parser.add_option('-o', dest='act_output', default='encode_roadmap_flanks_act.txt', help='output file name. [Default: %default]')
parser.add_option('-l', dest='output_dim', default=164, help='output dimension of the activations for each peak [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 1:
parser.error('Must provide fasta file providing BED-style sequences file paths.')
else:
flanks_fa_file = args[0]
#################################################################
# read in flanks bedfile, generate & write output to act file
#################################################################
outfile = open(options.act_output,'w')
with open(flanks_fa_file,'r') as f:
for flank in f:
if not flank.startswith('>'):
continue
flank_name = flank.lstrip('>').rstrip()
flank_act_output = flank_name + '\t' + make_flank_acts(options.output_dim)
print(flank_act_output, file=outfile)
outfile.close()
def make_flank_acts(length):
return '\t'.join(['0' for i in range(length)])
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| StarcoderdataPython |
6569606 | <reponame>kevintmcdonnell/stars
from io import StringIO
from os import path as p
import unittest
import unittest.mock as mock
import sbumips
'''
https://github.com/sbustars/STARS
Copyright 2020 <NAME>, <NAME>, and <NAME>
Developed by <NAME> (<EMAIL>),
<NAME> (<EMAIL>),
and <NAME> (<EMAIL>)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
class MyTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(MyTestCase, self).__init__(*args, **kwargs)
def execute_file(self, op):
# Method to execute tests file by running the command line script
with mock.patch("sys.stdout", new_callable=StringIO) as mock_stdout:
sbumips.main(["--noGui", f'{p.dirname(__file__)}/{op}_test.asm'])
output = mock_stdout.getvalue()
return output
def execute_test(self, op, expected_output):
output = self.execute_file(op)
self.assertEqual(expected_output, output)
# The actual tests start here
def test_abs(self):
self.execute_test('abs', '30000 30000')
def test_neg(self):
self.execute_test('neg', '-30000 30000 0')
def test_seq_sne(self):
self.execute_test('seq_sne', '1001')
def test_sge_sgeu(self):
self.execute_test('sge_sgeu', '1101')
def test_sgt_sgtu(self):
self.execute_test('sgt_sgtu', '0001')
def test_sle_sleu(self):
self.execute_test('sle_sleu', '1110')
def test_not(self):
self.execute_test('not', '0xf0f0f0f0 0x0f0f0f0f')
# I guess you could say we're on a 'rol'
def test_rol_ror(self):
self.execute_test('rol_ror', '0x4abcd123 0xbcd1234a')
def test_rolv_rorv(self):
self.execute_test('rolv_rorv', '0x4abcd123 0xbcd1234a')
def test_move_li(self):
self.execute_test('move_li', '300 -300 3000000 -3000000')
def test_beqz_bnez(self):
self.execute_test('beqz_bnez', '1001')
def test_bgt_bgtu(self):
self.execute_test('bgt_bgtu', '1110')
def test_bge_bgeu(self):
self.execute_test('bge_bgeu', '1110')
def test_blt_bltu(self):
self.execute_test('blt_bltu', '0001')
def test_ble_bleu(self):
self.execute_test('ble_bleu', '1011')
def test_b(self):
self.execute_test('b', '1')
def test_load_store(self):
self.execute_test('load_store', '42 11790 216220320')
| StarcoderdataPython |
1947669 | <filename>src/parameters.py
###################################################################################################
# Repository: https://github.com/lgervasoni/urbansprawl
# MIT License
###################################################################################################
storage_folder = 'data'
images_folder = 'images'
# Format for load/save the geo-data ['geojson','shp']
geo_format = 'geojson' # 'shp'
geo_driver = 'GeoJSON' # 'ESRI Shapefile'
### Files referring to population gridded data
files = {}
# Geometries for INSEE population data
files["insee_shapefile"] = "../200m-carreaux-metropole/carr_pop4326.shp"
# dbf file with attributes
files["insee_data_file"] = "../200m-carreaux-metropole/car_m.dbf"
# Worlde-wide gridded population data: Converted to shapefile (EPSG 4326)
files["gpw_population_world"] = "../gpwv4/gpw-v4.shp"
def get_dataframes_filenames(city_ref_file):
"""
Get data frame file names for input city
Parameters
----------
city_ref_file : string
name of input city
Returns
----------
[ string, string, string ]
returns filenames for buildings, building parts, and points of interest
"""
import os
if not(os.path.isdir(storage_folder)):
os.makedirs(storage_folder)
geo_poly_file = storage_folder+"/"+city_ref_file+"_poly."+geo_format
geo_poly_parts_file = storage_folder+"/"+city_ref_file+"_poly_parts."+geo_format
geo_point_file = storage_folder+"/"+city_ref_file+"_poi."+geo_format
return geo_poly_file, geo_poly_parts_file, geo_point_file
def get_population_extract_filename(city_ref_file, data_source):
"""
Get data population extract filename for input city
Parameters
----------
city_ref_file : string
name of input city
data_source : string
desired population data source
Returns
----------
string
returns the population extract filename
"""
# Folder exists?
import os
if not(os.path.isdir(storage_folder + "/" + data_source)):
os.makedirs(storage_folder + "/" + data_source)
return storage_folder + "/" + data_source + "/" + city_ref_file + "_population.shp"
def get_population_urban_features_filename(city_ref_file, data_source):
"""
Get population urban features extract filename for input city
Force GeoJSON format: Shapefiles truncate column names
Parameters
----------
city_ref_file : string
name of input city
data_source : string
desired population data source
Returns
----------
string
returns the population extract filename
"""
# Folder exists?
import os
if not(os.path.isdir(storage_folder + "/" + data_source)):
os.makedirs(storage_folder + "/" + data_source)
return storage_folder + "/" + data_source + "/" + city_ref_file + "_urban_features." + geo_format
def get_population_training_validating_filename(city_ref_file, data_source="training"):
"""
Get population normalised urban features extract and population densities filename for input city
Stored in Numpy.Arrays
Parameters
----------
city_ref_file : string
name of input city
Returns
----------
string
returns the numpy stored/storing filename
"""
# Folder exists?
import os
if not(os.path.isdir(storage_folder + "/" + data_source)):
os.makedirs(storage_folder + "/" + data_source)
return storage_folder + "/" + data_source + "/" + city_ref_file + "_X_Y.npz" | StarcoderdataPython |
260870 | <reponame>jason-168/MLCode
# Code from Chapter 14 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by <NAME> (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2008, 2014
# Examples of using the k-means and SOM algorithms on the Iris dataset
import pylab as pl
import numpy as np
iris = np.loadtxt('../3 MLP/iris_proc.data',delimiter=',')
iris[:,:4] = iris[:,:4]-iris[:,:4].mean(axis=0)
imax = np.concatenate((iris.max(axis=0)*np.ones((1,5)),iris.min(axis=0)*np.ones((1,5))),axis=0).max(axis=0)
iris[:,:4] = iris[:,:4]/imax[:4]
target = iris[:,4]
order = range(np.shape(iris)[0])
np.random.shuffle(order)
iris = iris[order,:]
target = target[order]
train = iris[::2,0:4]
traint = target[::2]
valid = iris[1::4,0:4]
validt = target[1::4]
test = iris[3::4,0:4]
testt = target[3::4]
#print train.max(axis=0), train.min(axis=0)
import kmeansnet
#import kmeans as kmeansnet
net = kmeansnet.kmeans(3,train)
net.kmeanstrain(train)
cluster = net.kmeansfwd(test)
print 1.*cluster
print iris[3::4,4]
import som
net = som.som(6,6,train)
net.somtrain(train,400)
best = np.zeros(np.shape(train)[0],dtype=int)
for i in range(np.shape(train)[0]):
best[i],activation = net.somfwd(train[i,:])
pl.plot(net.map[0,:],net.map[1,:],'k.',ms=15)
where = pl.find(traint == 0)
pl.plot(net.map[0,best[where]],net.map[1,best[where]],'rs',ms=30)
where = pl.find(traint == 1)
pl.plot(net.map[0,best[where]],net.map[1,best[where]],'gv',ms=30)
where = pl.find(traint == 2)
pl.plot(net.map[0,best[where]],net.map[1,best[where]],'b^',ms=30)
pl.axis([-0.1,1.1,-0.1,1.1])
pl.axis('off')
pl.figure(2)
best = np.zeros(np.shape(test)[0],dtype=int)
for i in range(np.shape(test)[0]):
best[i],activation = net.somfwd(test[i,:])
pl.plot(net.map[0,:],net.map[1,:],'k.',ms=15)
where = pl.find(testt == 0)
pl.plot(net.map[0,best[where]],net.map[1,best[where]],'rs',ms=30)
where = pl.find(testt == 1)
pl.plot(net.map[0,best[where]],net.map[1,best[where]],'gv',ms=30)
where = pl.find(testt == 2)
pl.plot(net.map[0,best[where]],net.map[1,best[where]],'b^',ms=30)
pl.axis([-0.1,1.1,-0.1,1.1])
pl.axis('off')
pl.show()
| StarcoderdataPython |
3543650 | """States are containers for occupancy values. States hold a
number of numpy arrays which are all of the same length.
state.q_val holds a numpy array of quantity values
These are the most important values in the system, they
show the quantity of material (in moles) of a given species
in a given compartment.
The rest of the arrays are useful for making selections:
state.species is a numpy (string) array of species IDs
state.compartment is a numpy (string) array of compartment IDs
state.x_pos is a numpy array of x_positions of the compartment centers
state.y_pos is a numpy array of y_positions of the compartment centers
state.z_pos is a numpy array of z_positions of the compartment centers
Note that species values for a given compartment will only be created
if there is either:
1) a Reaction in that compartment which involves that Species
or 2) a connection from that compartment which involves that Species
When initializing the state, an index is constructed to easily
determine an index given a compartment and a species:
index = state.index[compID][specID]
"""
from openrxn import unit
from openrxn.model import FlatModel
import numpy as np
import pandas as pd
class State(object):
def __init__(self, model=None, dataframe=None, units=[unit.nanometer]*3):
"""State objects can be initialized using either a
FlatModel or a dataframe object. At minimum, the
dataframe needs to have "species" and "compartment"
columns."""
self.index = {}
self.units = units
if model is not None:
assert isinstance(model,FlatModel), "Error! A state object needs a FlatModel to initialize."
self._init_from_model(model)
elif dataframe is not None:
if 'species' not in dataframe.columns or 'compartment' not in dataframe.columns:
raise ValueError("Error! dataframe must contain columns for 'species' and 'compartment'")
self._init_from_df(dataframe)
self.size = len(self.compartment)
self.q_val = np.zeros((self.size))
def _init_from_model(self, model):
big_species_list = []
big_comp_list = []
big_x_list = []
big_y_list = []
big_z_list = []
running_index = 0
for c_tag, c in model.compartments.items():
self.index[c_tag] = {}
# figure out which species are associated with this compartment
spec = []
for other_c, conn in c.connections.items():
spec += list(conn[1].species_rates.keys())
for rxn in c.reactions:
spec += rxn.reactant_IDs
spec += rxn.product_IDs
spec_set = set(spec)
for i,s in enumerate(spec_set):
self.index[c_tag][s] = running_index + i
running_index += len(spec_set)
big_species_list += list(spec_set)
big_comp_list += [c.ID]*len(spec_set)
# for x, y and z, average the boundary values
x = [None,None,None]
for i in range(len(c.pos)):
x[i] = 0.5*(c.pos[i][0]+c.pos[i][1]).to(self.units[i]).magnitude
big_x_list += [x[0]]*len(spec_set)
big_y_list += [x[1]]*len(spec_set)
big_z_list += [x[2]]*len(spec_set)
self.species = np.array(big_species_list)
self.compartment = np.array(big_comp_list)
self.x_pos = np.array(big_x_list)
self.y_pos = np.array(big_y_list)
self.z_pos = np.array(big_z_list)
def _init_from_df(self, df):
# assign columns to self arrays
self.species = np.array(df['species'])
self.compartment = np.array(df['compartment'])
if 'x_pos' in df.columns:
self.x_pos = np.array(df['x_pos'])
if 'y_pos' in df.columns:
self.y_pos = np.array(df['y_pos'])
if 'z_pos' in df.columns:
self.z_pos = np.array(df['z_pos'])
# building self.index dictionary
for i in range(len(df['species'])):
if df['compartment'][i] not in self.index:
self.index[df['compartment'][i]] = {}
self.index[df['compartment'][i]][df['species'][i]] = i
def to_dataframe(self):
df = pd.DataFrame()
df['species'] = self.species
df['compartment'] = self.compartment
df['q_val'] = self.q_val
if hasattr(self,'x_pos'):
df['x_pos'] = self.x_pos
if hasattr(self,'y_pos'):
df['y_pos'] = self.y_pos
if hasattr(self,'z_pos'):
df['z_pos'] = self.z_pos
return df
def to_csv(self, filename):
df = self.to_dataframe()
df.to_csv(filename)
def to_csv_no_q(self, filename):
df = self.to_dataframe()
cols = [c for c in df.columns if c != 'q_val']
df.to_csv(filename,columns=cols)
| StarcoderdataPython |
8043448 | <gh_stars>0
import pandas as pd
import matplotlib.pyplot as plt
def calculate(model):
data = pd.read_csv(model + '.csv')
data['Accuracy'] = data['correctPredictions'] / (data['totalPredictions'] + data['totalAnswers'] - data['correctPredictions'])
data['Precision'] = data['correctPredictions'] / data['totalAnswers']
data['Recall'] = data['correctPredictions'] / data['totalPredictions']
data['F1'] = 2 * data['correctPredictions'] / (data['totalPredictions'] + data['totalAnswers'])
return data
def show_plot(statistic, dfs, all_models):
plt.figure()
plt.ylim(0, 1)
plt.title(statistic)
values = []
for mod in all_models:
val = dfs[mod][statistic].mean()
values.append(val)
plt.text(mod, val + 0.02, round(val, 3), ha='center')
plt.bar(models, values)
plt.show()
dataframes = {}
models = ['CNN14', 'ResNet22', 'DaiNet19', 'MobileNetV1']
for model in models:
dataframes[model] = calculate(model)
show_plot('Accuracy', dataframes, models)
show_plot('Precision', dataframes, models)
show_plot('Recall', dataframes, models)
show_plot('F1', dataframes, models)
plt.figure()
plt.title('Accuracy compared to time taken to process 1000 wav files')
plt.ylim(0, 1)
plt.ylabel('Accuracy')
plt.xlabel('Minutes')
values = []
time = []
for model in models:
model_time = dataframes[model]['prediction_time_seconds'].sum() / 60
model_acc = dataframes[model]['Accuracy'].mean()
time.append(model_time)
values.append(model_acc)
plt.text(model_time, model_acc + 0.01, model)
plt.plot(time, values, 'ro')
plt.show()
| StarcoderdataPython |
4950904 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import time
import requests
round_delay = 20
def log(msg):
sys.stdout.write("%s\n" % str(msg))
sys.stdout.flush()
def look_for_my_change(gate, cid):
for queue in gate['change_queues']:
if queue['heads']:
for head in queue['heads']:
for change in head:
if change['id'].split(',')[0] == cid:
log("Found change in shared queue: " +
"%s" % queue['name'])
return change
def check_jobs_status(my_change):
status = {}
for job in my_change['jobs']:
if job['name'] == myname:
continue
status[job['name']] = None
if job['end_time']:
log("Job: %s terminated with status: %s" % (
job['name'], job['result']))
else:
log("Job: %s still running" % job['name'])
status[job['name']] = 2
continue
if job['result'] == 'SUCCESS':
status[job['name']] = 0
else:
status[job['name']] = 1
return status
def fetch_get_pipeline_status(host):
log("Fetching Zuul status")
r = requests.get("%s/status.json" % host).json()
return [pipeline for pipeline in r['pipelines'] if
pipeline['name'] == 'gate'][0]
def check_non_voting(status, my_change):
for k, v in status.items():
if v == 1:
job = [j for j in my_change['jobs'] if j['name'] == k][0]
if job['voting']:
log("Job: %s is voting !" % k)
return False
return True
if __name__ == "__main__":
host = os.environ['ZUUL_URL'].rstrip('/p')
myname = os.environ['JOB_NAME']
change = os.environ['ZUUL_CHANGE']
while True:
log("")
gate = fetch_get_pipeline_status(host)
my_change = look_for_my_change(gate, change)
if not my_change:
log("Error. Change does not exists !")
sys.exit(1)
if my_change['item_ahead'] is None:
log("Found current jobs running along with me")
status = check_jobs_status(my_change)
if len([v for v in status.values() if v == 0]) == \
len(my_change['jobs']) - 1:
log("All jobs succeed for this change")
break
elif len([v for v in status.values() if v == 2]):
log("At least one job is in progress. Waiting ...")
time.sleep(round_delay)
continue
else:
if check_non_voting(status, my_change):
log("All jobs in failure are non voting")
break
else:
log("Jobs finished but at least one voting job failed")
sys.exit(1)
else:
log("Change is not ahead of the shared queue. waiting ...")
time.sleep(round_delay)
| StarcoderdataPython |
6450847 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def dice_loss(pred, target):
'''
pred: (M, H, W)
target: (N, H, W)
'''
smooth = 1.
pflat = pred.reshape(pred.shape[0], 1, -1)
tflat = target.reshape(1, target.shape[0], -1)
intersection = (pflat * tflat).sum(-1)
return 1 - ((2. * intersection + smooth) / ((pflat * pflat).sum(-1) + (tflat * tflat).sum(-1) + smooth))
def fuse_mask(tracks, dets, cost_matrics, lambda_=0.8):
tracks_mask = np.stack([track.mask for track in tracks])
dets_mask = np.stack([det.mask for det in dets])
mask_dists = dice_loss(tracks_mask, dets_mask)
cost_matrics = lambda_ * cost_matrics + (1 - lambda_) * mask_dists
return cost_matrics
def oks_dist(predict, anno, delta=0.05):
'''
predict: (M, K=17, 2)
anno: (N, K=17, 2)
'''
M, K = predict.shape[:2]
N, K = anno.shape[:2]
predict = predict.reshape(M, 1, K, -1).repeat(N, axis=1)
anno = anno.reshape(1, N, K, -1).repeat(M, axis=0)
xmax = np.max(np.concatenate((anno[:,:,:, 0], predict[:,:,:, 0]), axis=-1), axis=-1)
xmin = np.min(np.concatenate((anno[:,:,:, 0], predict[:,:,:, 0]), axis=-1), axis=-1)
ymax = np.max(np.concatenate((anno[:,:,:, 1], predict[:,:,:, 1]), axis=-1), axis=-1)
ymin = np.min(np.concatenate((anno[:,:,:, 1], predict[:,:,:, 1]), axis=-1), axis=-1)
scale = ((xmax - xmin) * (ymax - ymin)).reshape(M, N, 1)
dist = np.sum((anno - predict)**2, axis=-1)
oks = np.mean(np.exp(-dist / 2 / scale / delta**2), axis=-1)
return 1 - oks
def fuse_pose(tracks, dets, cost_matrics, lambda_=0.8):
tracks_hp = np.stack([np.array(track.hp).reshape(-1, 2) for track in tracks])
dets_hp = np.stack([np.array(det.hp).reshape(-1, 2) for det in dets])
pose_dists = oks_dist(tracks_hp, dets_hp)
cost_matrics = lambda_ * cost_matrics + (1 - lambda_) * pose_dists
return cost_matrics
def fuse_depth(tracks, dets, cost_matrics, lambda_=0.8):
tracks_depth = np.array([track.backup[10] for track in tracks])
dets_depth = np.array([det.backup[10] for det in dets])
tracks_depth = tracks_depth.reshape(-1, 1)
dets_depth = dets_depth.reshape(1, -1)
depth_dists = (tracks_depth - dets_depth) ** 2
depth_dists = depth_dists / np.max(depth_dists)
cost_matrics = lambda_ * cost_matrics + (1 - lambda_) * depth_dists
return cost_matrics | StarcoderdataPython |
9701733 | # -*- coding: utf-8 -*-
"""Command line interface for Axonius API Client."""
from ...context import CONTEXT_SETTINGS, click
from ...options import AUTH, add_options
from .grp_common import EXPORT, handle_export
USER_NAME = click.option(
"--name",
"-n",
"name",
help="Name of user",
required=True,
show_envvar=True,
show_default=True,
)
PASSWORD = click.option(
"--password",
"-p",
"password",
help="Name of user",
required=False,
show_envvar=True,
show_default=True,
)
FIRST_NAME = click.option(
"--first-name",
"-f",
"first_name",
help="First name of user",
required=False,
show_envvar=True,
show_default=True,
)
LAST_NAME = click.option(
"--last-name",
"-l",
"last_name",
help="Last name of user",
required=False,
show_envvar=True,
show_default=True,
)
EMAIL = click.option(
"--email",
"-e",
"email",
help="Email address of user",
required=False,
show_envvar=True,
show_default=True,
)
ROLE_NAME = click.option(
"--role-name",
"-r",
"role_name",
help="Role to assign to user",
required=False,
show_envvar=True,
show_default=True,
)
OPTIONS = [*AUTH, EXPORT, FIRST_NAME, LAST_NAME, EMAIL, USER_NAME, PASSWORD, ROLE_NAME]
@click.command(name="update", context_settings=CONTEXT_SETTINGS)
@add_options(OPTIONS)
@click.pass_context
def cmd(ctx, url, key, secret, export_format, name, **kwargs):
"""Update a user."""
client = ctx.obj.start_client(url=url, key=key, secret=secret)
with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):
data = client.system.users.update(name=name, **kwargs)
ctx.obj.echo_ok(f"Updated user {name!r}")
handle_export(ctx=ctx, data=data, export_format=export_format, **kwargs)
| StarcoderdataPython |
3518425 | class Casa:
def __init__(self, pared):
self.pared = pared
def superficie_acristalada(self):
return sum(self.pared.area)
class Pared:
def __init__(self, ventana, orientacion):
self.ventana = ventana
self.orientacion = orientacion
self.area = self.ventana.get_area()
class Cristal:
def __init__(self, anchuracristal):
self.anchuracristal = anchuracristal
def get_anchuracristal(self):
return self.anchuracristal
class Ventana(Cristal):
def __init__(self, anchuracristal, area):
super().__init__(anchuracristal)
self.area = area
def get_area(self):
return self.area
ventana1 = Ventana(2,3)
ventana2 = Ventana(1,4)
pared1 = Pared(ventana1, "izquierda")
casita = Casa(pared1)
casita.superficie_acristalada() | StarcoderdataPython |
3591509 | from .db import db
class CookingList(db.Model):
__tablename__ = 'cooking_lists'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
recipe_id = db.Column(db.Integer, db.ForeignKey(
"recipes.id"), nullable=False)
created_at = db.Column(db.DateTime, server_default=db.func.now())
updated_at = db.Column(db.DateTime, server_default=db.func.now(),
server_onupdate=db.func.now())
users = db.relationship("User", back_populates="cooking_lists")
recipes = db.relationship("Recipe", back_populates="cooking_lists")
def to_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
| StarcoderdataPython |
3261722 | from django_evolution.mutations import ChangeField
MUTATIONS = [
ChangeField('Repository', 'bug_tracker', max_length=256),
]
| StarcoderdataPython |
3464998 | from sklearn.svm import LinearSVC
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
from sklearn.model_selection import train_test_split
# Reading training data
train_data = pd.read_csv("train.csv")
train_data.head()
# Reading testing data
test_data = pd.read_csv("test.csv")
test_data.head()
# Encoding Data
labelencoder = LabelEncoder()
train_data["species"] = labelencoder.fit_transform(train_data["species"])
train_data.head()
# Defining dependent and Independent data
X = train_data.iloc[:, 1:]
Y = train_data["species"]
# Train-test data split
train_x, test_x, train_y, test_y = train_test_split(
X, Y, random_state=5, test_size=20)
# 1. Random Forest Classifier
ran_class = RandomForestClassifier(n_estimators=100)
ran_class.fit(train_x, train_y)
predict_rf = ran_class.predict(test_x)
print("Radom Forest Accuracy Score : ",
metrics.accuracy_score(predict_rf, test_y))
# 2. Decision Tree Classifier
dec_class = DecisionTreeClassifier()
dec_class.fit(train_x, train_y)
predict_dt = dec_class.predict(test_x)
print("Decision Tree Accuracy Score : ",
metrics.accuracy_score(predict_dt, test_y))
# 3. Naive Bayes Classifier
gn_class = GaussianNB()
gn_class.fit(train_x, train_y)
predict_gn = gn_class.predict(test_x)
print("Naive Bayes Accuracy Score : ",
metrics.accuracy_score(predict_gn, test_y))
# 4. SVM Classifier
svm_class = LinearSVC()
svm_class.fit(train_x, train_y)
predict_svm = svm_class.predict(test_x)
print("SVM Accuracy Score : ", metrics.accuracy_score(predict_svm, test_y))
# Best Classifier - Naive Bayes
predict_test = gn_class.predict(test_data)
# labelencoder.inverse_transform(predict_test) : Inverse the encoding
test_data["species"] = labelencoder.inverse_transform(predict_test)
test_data.head()
# save predicted data
test_data.to_csv("Output.csv", index=True)
| StarcoderdataPython |
1738231 | <filename>setup.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from setuptools import setup, find_packages
def load_requirements(fname):
is_comment = re.compile("^\s*(#|--).*").match
with open(fname) as fo:
return [line.strip() for line in fo if not is_comment(line) and line.strip()]
with open("README.rst", "rt") as f: readme = f.read()
with open("HISTORY.rst", "rt") as f: history = f.read().replace(".. :changelog:", "")
with open("arff_utils/__init__.py") as f: version_file_contents = f.read()
requirements = load_requirements("requirements.txt")
requirements_tests = load_requirements("requirements_tests.txt")
ver_dic = {}
exec(compile(version_file_contents, "arff_utils/__init__.py", "exec"), ver_dic)
setup(
name="arff_utils",
version=ver_dic["VERSION"],
description="Library for reading and writing ARFF files and converting from ARFF to Pandas or Numpy data structures",
long_description=readme + "\n\n" + history,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/rbrecheisen/arff_utils",
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
license="LGPL v3",
zip_safe=False,
keywords="arff_utils",
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Scientific/Engineering"
],
test_suite="tests",
tests_require=requirements_tests
)
| StarcoderdataPython |
30115 | from typing import Tuple
from abc import abstractmethod
from torch import Tensor
from torch.nn import Module
class BaseDiscriminator(Module):
@abstractmethod
def forward_(self, z: Tensor) -> Tuple[Tensor, Tensor]:
raise NotImplementedError
def forward(self, z: Tensor) -> Tuple[Tensor, Tensor]: # pylint: disable=arguments-differ
return self.forward_(z=z)
| StarcoderdataPython |
6665035 | import chainer
import numpy as np
from test.util import generate_kernel_test_case, wrap_template
from webdnn.frontend.chainer.converter import ChainerConverter
@wrap_template
def template(n=2, c_in=4, h_in=6, w_in=8, c_out=10, ksize=3, stride=1, pad=0, nobias=True, EPS=1e-5, description=""):
link = chainer.links.Convolution2D(c_in, c_out, ksize=ksize, stride=stride, pad=pad, nobias=nobias)
link.W.data = np.random.rand(*link.W.shape).astype(np.float32)
vx = chainer.Variable(np.random.rand(*(n, c_in, h_in, w_in)).astype(np.float32))
vy = link(vx)
graph = ChainerConverter().convert([vx], [vy])
x = graph.inputs[0]
y = graph.outputs[0]
generate_kernel_test_case(
description=f"[chainer] L.Convolution2D {description}",
graph=graph,
inputs={x: vx.data},
expected={y: vy.data},
EPS=EPS
)
def test():
template()
def test_nobias():
template(nobias=True)
def test_nopadding():
template(pad=0)
def test_irregular_kernel_size():
template(ksize=(3, 4))
def test_irregular_stride_size():
template(stride=(2, 3))
def test_irregular_padding_size1():
template(pad=(1, 2))
def test_irregular_padding_size2():
template(pad=2)
def test_irregular_padding_size3():
template(pad=2, ksize=5)
def test_irregular_padding_size4():
template(pad=(1, 0))
def test_irregular_size():
template(ksize=(3, 5), stride=(2, 3), pad=(1, 3))
def test_special_size():
# https://github.com/mil-tokyo/webdnn/issues/525
# In case that the max position index (=n*c_in*h_in*w_in*ksize*ksize) > 1<<23
template(n=1, c_in=1 << 6, h_in=1 << 7, w_in=1 << 7, c_out=3, ksize=(1 << 2) + 1, pad=1 << 1)
| StarcoderdataPython |
200235 | <reponame>karolinyoliveira/leetcode-ebbinghaus-practice
from typing import List
def threeSum(nums: List[int]) -> List[List[int]]:
response = []
nums.sort()
for i, n in enumerate(nums):
if i > 0 and n == nums[i - 1]:
continue
l, r = i + 1, len(nums) - 1
while l < r:
s = n + nums[l] + nums[r]
if s > 0:
r -= 1
elif s < 0:
l += 1
else:
response.append([n, nums[l], nums[r]])
l += 1
while nums[l] == nums[l - 1] and l < r:
l += 1
return response | StarcoderdataPython |
5160360 | <reponame>newfacade/machine-learning-notes
# EM
## Mixtures of Gaussians
given a training set $\left\{x^{(1)},...,x^{(n)}\right\}$ with no labels. each point $x^{(i)}$ has a latent variable $z^{(i)}$, the data is specified by a joint probability $p(x^{(i)}, z^{(i)}) = p(x^{(i)}| z^{(i)})p(z^{(i)})$
mixtures of gaussians assumes $z^{(i)}\sim Multinomial(\phi)$ (where $\phi_{j} \ge 0, \sum_{j=1}^{k}\phi_{j}=1$, $\phi_{j}$ gives $p(z^{(i)}=j)$), and $x^{(i)}|z^{(i)}=j \sim \mathcal{N}(\mu_{j}, \Sigma_{j})$. we let $k$ denote the number of values $z^{(i)}$'s can take on.
thus our model posits that each $x^{(i)}$ was generated by randomly choosing $z^{(i)}$ from $\{1,...,k\}$, and $x^{(i)}$ was drawn from one of $k$ gaussians depending on $z^{(i)}$.
the parameters of our model are $\phi, \mu, \Sigma$, to estimate them, we can write down the likelihood of our data:
$$
\begin{equation}
\begin{split}
l(\phi, \mu, \Sigma) =& \sum_{i=1}^{n}log\,p(x^{(i)};\phi,\mu,\Sigma)\\
=& \sum_{i=1}^{n}log\sum_{z^{(i)}=1}^{k}p(x^{(i)}|z^{(i)}; \mu,\Sigma)p(z^{(i)};\phi)
\end{split}
\end{equation}
$$
it is not possible to find the maximum likelihood estimate of the parameters in closed form.
on the other hand, if we knew what $z^{(i)}$'s were, the maximum likelihood problem would have been easy.
specifically, we could then write down the likelihood as:
$$l(\phi, \mu, \Sigma) = \sum_{i=1}^{n}log\,p(x^{(i)}|z^{(i)};\mu,\Sigma) + log\,p(z^{(i)}; \phi)$$
maximizing this with respect to $\phi, \mu, \Sigma$ gives the parameters:
$$\phi_{j} = \frac{1}{n}\sum_{i=1}^{n}1\{z^{(i)} = j\}$$
$$\mu_{j} = \frac{\sum_{i=1}^{n}1\{z^{(i)} = j\}x^{(i)}}{\sum_{i=1}^{n}1\{z^{(i)} = j\}}$$
$$\Sigma_{j} = \frac{\sum_{i=1}^{n}1\{z^{(i)} = j\}(x^{(i)} - \mu_{j})(x^{(i)} - \mu_{j})^{T}}{\sum_{i=1}^{n}1\{z^{(i)} = j\}}$$
it is identical to the gaussian discriminant analysis(GDA).
however, in our density estimation problems, $z^{(i)}$'s are not known, what can we do?
the EM algorithm is an iterative algorithm that has two main steps:
**1.the E-step, it tries to "guess" the values of the $z^{(i)}$'s.**
**2.the M-step, it updates the parameters of our model based on our guesses.**
since in the M-step we are pretending that the guesses in the first part were correct, the maximization becomes easy, here's the algorithm:
(E-step) for each $i, j$ set
$$w_{j}^{(i)} := p(z^{i}=j|x^{(i)}; \phi,\mu,\Sigma)$$
(M-step) update the parameters:
$$\phi_{j} := \frac{1}{n}\sum_{i=1}^{n}w_{j}^{(i)}$$
$$\mu_{j} := \frac{\sum_{i=1}^{n}w_{j}^{(i)}x^{(i)}}{\sum_{i=1}^{n}w_{j}^{(i)}}$$
$$\Sigma_{j} := \frac{\sum_{i=1}^{n}w_{j}^{(i)}(x^{(i)} - \mu_{j})(x^{(i)} - \mu_{j})^{T}}{\sum_{i=1}^{n}w_{j}^{(i)}}$$
the EM-algorithm likes the K-means-algorithm.
except that instead of "hard" cluster assignments $c(i)$, we have the "soft" assignments $w_{j}^{(i)}$.
from sklearn.mixture import GaussianMixture
from sklearn.datasets import make_blobs
import numpy as np
X1, y1 = make_blobs(n_samples=1000, centers=((4, -4), (0, 0)), random_state=42)
X1 = X1.dot(np.array([[0.374, 0.95], [0.732, 0.598]]))
X2, y2 = make_blobs(n_samples=250, centers=1, random_state=42)
X2 = X2 + [6, -8]
X = np.r_[X1, X2]
y = np.r_[y1, y2]
import matplotlib.pyplot as plt
def plot_clusters(X, y=None):
plt.scatter(X[:, 0], X[:, 1], c=y, s=1)
plt.xlabel("$x_1$", fontsize=14)
plt.ylabel("$x_2$", fontsize=14, rotation=0)
plot_clusters(X)
gm = GaussianMixture(n_components=3, n_init=10, random_state=42)
gm.fit(X)
gm.weights_
gm.means_
gm.covariances_
gm.converged_, gm.n_iter_
gm.predict(X), gm.predict_proba(X)
X_new, y_new = gm.sample(7)
gm.score_samples(X_new)
gm.bic(X), gm.aic(X)
## The EM algorithm
here comes the general view of EM.
like the assumption before, we have the training set $\left\{x^{(1)},...,x^{(n)}\right\}$, each point $x^{(i)}$ has a latent variable $z^{(i)}$.
denote $\theta$ as the distribution parameters($\phi,\mu,\Sigma$ in mixtures of gaussian), then density of $x$ can be obtained by:
$$p(x;\theta) = \sum_{z}p(x,z;\theta)$$
the log-likelihood of the data:
$$
\begin{equation}
\begin{split}
l(\theta) =& \sum_{i=1}^{n}log\,p(x^{(i)}; \theta) \\
=& \sum_{i=1}^{n}log\sum_{z^{(i)}}p(x^{(i)}, z^{(i)}; \theta)
\end{split}
\end{equation}
$$
instead of solving the maximum-likelihood directly(usually very hard), the EM algorithm repeatedly construct a lower-bound on $l$(E-step), and then optimize that lower-bound(M-step).
we first consider optimizing the likelihood $log\,p(x)$ for a single example $x$, optimize $log\,p(x;\theta)$:
$$log\,p(x;\theta) = log\sum_{z}p(x,z;\theta)$$
let $Q$ be a distribution over the possible values of $z$, then:
$$
\begin{equation}
\begin{split}
log\,p(x;\theta) =& log\sum_{z}p(x,z;\theta)\\
=& log\sum_{z}Q(z)\frac{p(x,z;\theta)}{Q(z)}\\
\ge& \sum_{z}Q(z)log\frac{p(x,z;\theta)}{Q(z)}
\end{split}
\end{equation}
$$
the last step uses Jensen's inequality, for $(log\,x)'' = -1/x^{2} < 0$ so that strictly concave.
for any distribution $Q$, the above formula gives a lower-bound on $log\,p(x;\theta)$.
to make the bound tight for a particular value of $\theta$, we want the Jensen's inequality to hold with equality. it is sufficient the expecation take on "constant"-valued random variables, i.e:
$$\frac{p(x,z;\theta)}{Q(z)} = c$$
this leads to:
$$
\begin{equation}
\begin{split}
Q(z) =& \frac{p(x,z;\theta)}{\sum_{z}p(x,z;\theta)}\\
=& \frac{p(x,z;\theta)}{p(x;\theta)}\\
=& p(z|x;\theta)
\end{split}
\end{equation}
$$
thus, we simply set $Q$ to be the posterior distribution of $z$'s given $x$ and $\theta$.
for convenience, we call the bound as **evidence lower bound(ELBO)**:
$$ELBO(x;Q,\theta) = \sum_{z}Q(z)log\frac{p(x,z;\theta)}{Q(z)}$$
the estimation now can re-write as:
$$\forall Q,\theta,x,\quad log\,p(x;\theta) \ge ELBO(x;Q,\theta)$$
Intuitively, the EM algorithm alternatively update $Q$ and $\theta$ by:
a.setting $Q(z) = p(z|x;\theta)$ so that $ELBO(x;Q,\theta) = log\,p(x;\theta)$
b.maximizing $ELBO(x;Q,\theta)$ with respect to $\theta$.
now consider multiple examples $\left\{x^{(1)},...,x^{(n)}\right\}$, note the optimal choice of $Q$ is $p(z|x;\theta)$, and it depends on the particular example $x$.
therefore we introduce $n$ distributions $Q_{1},...,Q_{n}$, one for each example $x^{(i)}$, for each example:
$$log\,p(x^{(i)};\theta) \ge ELBO(x^{(i)};Q_{i},\theta) = \sum_{z^{(i)}}Q_{i}(z^{(i)})log\frac{p(x^{(i)},z^{(i)};\theta)}{Q_{i}(z^{(i)})}$$
taking sum over all examples, we obtain a lower bound for the log-likelihood:
$$
\begin{equation}
\begin{split}
l(\theta) \ge& \sum_{i}ELBO(x^{(i)};Q_{i},\theta)\\
=& \sum_{i}\sum_{z^{(i)}}Q_{i}(z^{(i)})log\frac{p(x^{(i)},z^{(i)};\theta)}{Q_{i}(z^{(i)})}
\end{split}
\end{equation}
$$
the above inequality holds for **any** distributions $Q_{1},...,Q_{n}$. equality holds when:
$$Q_{i}(z^{(i)}) = p(z^{(i)}|x^{(i)};\theta)$$
we now come to the definition of EM algorithm:
(E-step) for each $i$, set:
$$Q_{i}(z^{(i)}) := p(z^{(i)}|x^{(i)};\theta)$$
(M-step) for fixed $Q_{i}$'s, set:
$$
\begin{equation}
\begin{split}
\theta :=& \underset{\theta}{argmax}\sum_{i=1}^{n}ELBO(x^{(i)};Q_{i},\theta)\\
=& \underset{\theta}{argmax}\sum_{i=1}^{n}\sum_{z^{(i)}}Q_{i}(z^{(i)})log\frac{p(x^{(i)},z^{(i)};\theta)}{Q_{i}(z^{(i)})}
\end{split}
\end{equation}
$$
## Convergence of EM algorithm
suppose $\theta^{(t)}$ and $\theta^{(t+1)}$ are the parameters from two successive iterations of EM, we will now prove that $l(\theta^{(t)}) \le l(\theta^{(t+1)})$.
$$
\begin{equation}
\begin{split}
l(\theta^{(t+1)}) \ge& \sum_{i=1}^{n}ELBO(x^{(i)};Q_{i}^{(t)};\theta^{(t+1)})\quad (\mbox{by Jensen's inequality})\\
\ge& \sum_{i=1}^{n}ELBO(x^{(i)};Q_{i}^{(t)};\theta^{(t)})\quad (\mbox{by M-step}) \\
=& l(\theta^{(t)})\quad (\mbox{by E-step})
\end{split}
\end{equation}
$$
if we define:
$$ELBO(Q, \theta) = \sum_{i=1}^{n}ELBO(x^{(i)}; Q_{i}, \theta)$$
then we know $l(\theta) \ge ELBO(Q, \theta)$.
the EM algorithm can be viewed an alternating maximization algorithm on $ELBO(Q, \theta)$:
the E-step maximizes it with respect to $Q$.
the M-steo maximizes it with respect to $\theta$.
## Bayesian Gaussian Mixture Models
from sklearn.mixture import BayesianGaussianMixture
bgm = BayesianGaussianMixture(n_components=10, n_init=10)
bgm.fit(X)
np.round(bgm.weights_, 2)
## Excercises
### 1.clustering
from sklearn.datasets import fetch_olivetti_faces
olivetti = fetch_olivetti_faces()
print(olivetti.DESCR)
olivetti.target
from sklearn.model_selection import StratifiedShuffleSplit
strat_split = StratifiedShuffleSplit(n_splits=1, test_size=40, random_state=42)
train_valid_idx, test_idx = next(strat_split.split(olivetti.data, olivetti.target))
X_train_valid = olivetti.data[train_valid_idx]
y_train_valid = olivetti.target[train_valid_idx]
X_test = olivetti.data[test_idx]
y_test = olivetti.target[test_idx]
strat_split = StratifiedShuffleSplit(n_splits=1, test_size=80, random_state=43)
train_idx, valid_idx = next(strat_split.split(X_train_valid, y_train_valid))
X_train = X_train_valid[train_idx]
y_train = y_train_valid[train_idx]
X_valid = X_train_valid[valid_idx]
y_valid = y_train_valid[valid_idx]
print(X_train.shape, y_train.shape)
print(X_valid.shape, y_valid.shape)
print(X_test.shape, y_test.shape)
from sklearn.decomposition import PCA
pca = PCA(0.99)
X_train_pca = pca.fit_transform(X_train)
X_valid_pca = pca.transform(X_valid)
X_test_pca = pca.transform(X_test)
pca.n_components_
from sklearn.cluster import KMeans
k_range = range(5, 150, 5)
kmeans_per_k = []
for k in k_range:
print("k={}".format(k))
kmeans = KMeans(n_clusters=k, random_state=42).fit(X_train_pca)
kmeans_per_k.append(kmeans)
from sklearn.metrics import silhouette_score
silhouette_scores = [silhouette_score(X_train_pca, model.labels_)
for model in kmeans_per_k]
best_index = np.argmax(silhouette_scores)
best_k = k_range[best_index]
best_score = silhouette_scores[best_index]
plt.figure(figsize=(8, 3))
plt.plot(k_range, silhouette_scores, "bo-")
plt.xlabel("$k$", fontsize=14)
plt.ylabel("Silhouette score", fontsize=14)
plt.plot(best_k, best_score, "rs")
plt.show()
best_k
inertias = [model.inertia_ for model in kmeans_per_k]
best_inertia = inertias[best_index]
plt.figure(figsize=(8, 3.5))
plt.plot(k_range, inertias, "bo-")
plt.xlabel("$k$", fontsize=14)
plt.ylabel("Inertia", fontsize=14)
plt.plot(best_k, best_inertia, "rs")
plt.show()
best_model = kmeans_per_k[best_index]
def plot_faces(faces, labels, n_cols=5):
faces = faces.reshape(-1, 64, 64)
n_rows = (len(faces) - 1) // n_cols + 1
plt.figure(figsize=(n_cols, n_rows * 1.1))
for index, (face, label) in enumerate(zip(faces, labels)):
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(face, cmap="gray")
plt.axis("off")
plt.title(label)
plt.show()
for cluster_id in np.unique(best_model.labels_):
print("Cluster", cluster_id)
in_cluster = best_model.labels_==cluster_id
faces = X_train[in_cluster]
labels = y_train[in_cluster]
plot_faces(faces, labels)
### 2.clustering + classification
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=150, random_state=42)
clf.fit(X_train_pca, y_train)
clf.score(X_valid_pca, y_valid)
X_train_reduced = best_model.transform(X_train_pca)
X_valid_reduced = best_model.transform(X_valid_pca)
X_test_reduced = best_model.transform(X_test_pca)
clf = RandomForestClassifier(n_estimators=150, random_state=42)
clf.fit(X_train_reduced, y_train)
clf.score(X_valid_reduced, y_valid)
from sklearn.pipeline import Pipeline
for n_clusters in k_range:
pipeline = Pipeline([
("kmeans", KMeans(n_clusters=n_clusters, random_state=42)),
("forest_clf", RandomForestClassifier(n_estimators=150, random_state=42))
])
pipeline.fit(X_train_pca, y_train)
print(n_clusters, pipeline.score(X_valid_pca, y_valid))
X_train_extended = np.c_[X_train_pca, X_train_reduced]
X_valid_extended = np.c_[X_valid_pca, X_valid_reduced]
X_test_extended = np.c_[X_test_pca, X_test_reduced]
clf = RandomForestClassifier(n_estimators=150, random_state=42)
clf.fit(X_train_extended, y_train)
clf.score(X_valid_extended, y_valid)
### 3.Gaussian Mixture Model
from sklearn.mixture import GaussianMixture
gm = GaussianMixture(n_components=40, random_state=42)
y_pred = gm.fit_predict(X_train_pca)
n_gen_faces = 20
gen_faces_reduced, y_gen_faces = gm.sample(n_samples=n_gen_faces)
gen_faces = pca.inverse_transform(gen_faces_reduced)
plot_faces(gen_faces, y_gen_faces)
n_rotated = 4
rotated = np.transpose(X_train[:n_rotated].reshape(-1, 64, 64), axes=[0, 2, 1])
rotated = rotated.reshape(-1, 64*64)
y_rotated = y_train[:n_rotated]
n_flipped = 3
flipped = X_train[:n_flipped].reshape(-1, 64, 64)[:, ::-1]
flipped = flipped.reshape(-1, 64*64)
y_flipped = y_train[:n_flipped]
n_darkened = 3
darkened = X_train[:n_darkened].copy()
darkened[:, 1:-1] *= 0.3
y_darkened = y_train[:n_darkened]
X_bad_faces = np.r_[rotated, flipped, darkened]
y_bad = np.concatenate([y_rotated, y_flipped, y_darkened])
plot_faces(X_bad_faces, y_bad)
X_bad_faces_pca = pca.transform(X_bad_faces)
gm.score_samples(X_bad_faces_pca)
gm.score_samples(X_train_pca[:10])
### 4.Using Dimensionality Reduction Techniques for Anomaly Detection
X_train_pca
def reconstruction_errors(pca, X):
X_pca = pca.transform(X)
X_reconstructed = pca.inverse_transform(X_pca)
mse = np.square(X_reconstructed - X).mean(axis=-1)
return mse
reconstruction_errors(pca, X_train).mean()
reconstruction_errors(pca, X_bad_faces).mean()
plot_faces(X_bad_faces, y_bad)
X_bad_faces_reconstructed = pca.inverse_transform(X_bad_faces_pca)
plot_faces(X_bad_faces_reconstructed, y_bad)
| StarcoderdataPython |
56084 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import unittest
from analysis.linear import feature
from analysis.linear.feature import ChangedFile
from analysis.linear.feature import MetaFeatureValue
from analysis.linear.linear_testcase import Feature0
from analysis.linear.linear_testcase import Feature1
from analysis.linear.linear_testcase import Feature2
from analysis.linear.linear_testcase import Feature3
from analysis.linear.linear_testcase import Feature4
from analysis.linear.linear_testcase import LinearTestCase
import libs.math.logarithms as lmath
from libs.meta_object import MetaDict
_MAXIMUM = 50.
class ChangelistFeatureTest(unittest.TestCase):
def testLinearlyScaledIsZero(self):
"""Test that ``LinearlyScaled`` takes 0 to 1."""
self.assertEqual(1., feature.LinearlyScaled(0., _MAXIMUM))
def testLinearlyScaledMiddling(self):
"""Test that ``LinearlyScaled`` takes middling values to middling values."""
self.assertEqual((_MAXIMUM - 42.) / _MAXIMUM,
feature.LinearlyScaled(42., _MAXIMUM))
def testLinearlyScaledIsOverMax(self):
"""Test that ``LinearlyScaled`` takes values over the max to 0."""
self.assertEqual(0., feature.LinearlyScaled(42., 10.))
def testLogLinearlyScaledIsZero(self):
"""Test that ``LogLinearlyScaled`` takes log(0) to log(1)."""
self.assertEqual(lmath.LOG_ONE, feature.LogLinearlyScaled(0., _MAXIMUM))
def testLogLinearlyScaledMiddling(self):
"""Test that ``LogLinearlyScaled`` works on middling values."""
self.assertEqual(
lmath.log((_MAXIMUM - 42.) / _MAXIMUM),
feature.LogLinearlyScaled(42., _MAXIMUM))
def testLogLinearlyScaledIsOverMax(self):
"""Test that ``LogLinearlyScaled`` takes values over the max to log(0)."""
self.assertEqual(lmath.LOG_ZERO, feature.LogLinearlyScaled(42., 10.))
class MetaFeatureValueTest(unittest.TestCase):
def setUp(self):
super(MetaFeatureValueTest, self).setUp()
self.feature = MetaFeatureValue(
'dummy', {feature.name: feature(3)(False)
for feature in [Feature0(), Feature1(), Feature3()]})
def testEqaul(self):
"""Tests overriding ``__eq__`` and ``__ne__``."""
copy_meta_feature = copy.deepcopy(self.feature)
self.assertTrue(self.feature == copy_meta_feature)
copy_meta_feature._name = 'dummy2'
self.assertTrue(self.feature != copy_meta_feature)
def testLen(self):
"""Tests overriding ``__len__``."""
self.assertEqual(len(self.feature), 3)
def testFormatReasons(self):
"""Tests ``FormatReasons`` returnes a list of formated reasons."""
feature0 = Feature0()
feature1 = Feature1()
feature2 = Feature2()
meta_feature = MetaFeatureValue(
'dummy',
{feature0.name: feature0(1)(False),
'meta': MetaFeatureValue(
'meta',
{feature1.name: feature1(2)(True),
feature2.name: feature2(3)(True)})})
self.assertEqual(meta_feature.reason, {'Feature0': 'reason0',
'Feature1': 'reason1',
'Feature2': 'reason2'})
self.assertEqual(meta_feature.reason, meta_feature._reason)
def testAggregateChangedFilesAggregates(self):
"""Test that ``AggregateChangedFiles`` does aggregate reasons per file.
In the main/inner loop of ``AggregateChangedFiles``: if multiple
features all blame the same file change, we try to aggregate those
reasons so that we only report the file once (with all reasons). None
of the other tests here actually check the case where the same file
is blamed multiple times, so we check that here.
In particular, we provide the same ``FeatureValue`` twice, and
hence the same ``ChangedFile`` twice; so we should get back a single
``ChangedFile`` but with the ``reasons`` fields concatenated.
"""
self.assertListEqual(self.feature.changed_files,
[ChangedFile(name='a.cc',
blame_url=None,
reasons=['file_reason0']),
ChangedFile(name='b.cc',
blame_url=None,
reasons=['file_reason0',
'file_reason1'])])
self.assertEqual(self.feature.changed_files,
self.feature._changed_files)
class WrapperMetaFeatureTest(LinearTestCase):
def testWrapperMetaFeatureWrapsIndependentFeatures(self):
for x in self._X:
for y in self._Y(x):
self.assertTrue(
self._meta_feature(x)(y) ==
MetaFeatureValue('WrapperFeature',
{'Feature0': Feature0()(x)(y),
'Feature1': Feature1()(x)(y),
'Feature2': Feature2()(x)(y),
'WrapperFeature': MetaFeatureValue(
'WrapperFeature',
{'Feature3': Feature3()(x)(y),
'Feature4': Feature4()(x)(y)})}))
| StarcoderdataPython |
5071106 | <filename>mrjob/aws.py
# -*- coding: utf-8 -*-
# Copyright 2013 Lyft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General information about Amazon Web Services, such as region-to-endpoint
mappings.
"""
from __future__ import with_statement
### EC2 Instances ###
# map from instance type to number of compute units
# from http://aws.amazon.com/ec2/instance-types/
EC2_INSTANCE_TYPE_TO_COMPUTE_UNITS = {
't1.micro': 2,
'm1.small': 1,
'm1.large': 4,
'm1.xlarge': 8,
'm2.xlarge': 6.5,
'm2.2xlarge': 13,
'm2.4xlarge': 26,
'c1.medium': 5,
'c1.xlarge': 20,
'cc1.4xlarge': 33.5,
'cg1.4xlarge': 33.5,
}
# map from instance type to GB of memory
# from http://aws.amazon.com/ec2/instance-types/
EC2_INSTANCE_TYPE_TO_MEMORY = {
't1.micro': 0.6,
'm1.small': 1.7,
'm1.large': 7.5,
'm1.xlarge': 15,
'm2.xlarge': 17.5,
'm2.2xlarge': 34.2,
'm2.4xlarge': 68.4,
'c1.medium': 1.7,
'c1.xlarge': 7,
'cc1.4xlarge': 23,
'cg1.4xlarge': 22,
}
### EMR ###
# EMR's hard limit on number of steps in a job flow
MAX_STEPS_PER_JOB_FLOW = 256
### Regions ###
# Based on http://docs.aws.amazon.com/general/latest/gr/rande.html
# See Issue #658 for why we don't just let boto handle this.
# where to connect to EMR. The docs say
# elasticmapreduce.<region>.amazonaws.com, but the SSL certificates,
# they tell a different story. See Issue #621.
# where the AWS docs say to connect to EMR
_EMR_REGION_ENDPOINT = 'elasticmapreduce.%(region)s.amazonaws.com'
# the host that currently works with EMR's SSL certificate
_EMR_REGION_SSL_HOST = '%(region)s.elasticmapreduce.amazonaws.com'
# the regionless endpoint doesn't have SSL issues
_EMR_REGIONLESS_ENDPOINT = 'elasticmapreduce.amazonaws.com'
# where to connect to S3
_S3_REGION_ENDPOINT = 's3-%(region)s.amazonaws.com'
_S3_REGIONLESS_ENDPOINT = 's3.amazonaws.com'
# us-east-1 doesn't have its own endpoint or need bucket location constraints
_S3_REGIONS_WITH_NO_LOCATION_CONSTRAINT = ['us-east-1']
# "EU" is an alias for the eu-west-1 region
_ALIAS_TO_REGION = {
'eu': 'eu-west-1',
}
# The region to assume if none is specified
_DEFAULT_REGION = 'us-east-1'
def _fix_region(region):
"""Convert "EU" to "eu-west-1", None to '', and convert to lowercase."""
region = (region or '').lower()
return _ALIAS_TO_REGION.get(region) or region
def emr_endpoint_for_region(region):
"""Get the host for Elastic MapReduce in the given AWS region."""
region = _fix_region(region)
if not region:
return _EMR_REGIONLESS_ENDPOINT
else:
return _EMR_REGION_ENDPOINT % {'region': region}
def emr_ssl_host_for_region(region):
"""Get the host for Elastic MapReduce that matches their SSL cert
for the given region. (See Issue #621.)"""
region = _fix_region(region)
if not region:
return _EMR_REGIONLESS_ENDPOINT
else:
return _EMR_REGION_SSL_HOST % {'region': region}
def s3_endpoint_for_region(region):
"""Get the host for S3 in the given AWS region."""
region = _fix_region(region)
if not region or region in _S3_REGIONS_WITH_NO_LOCATION_CONSTRAINT:
return _S3_REGIONLESS_ENDPOINT
else:
return _S3_REGION_ENDPOINT % {'region': region}
def s3_location_constraint_for_region(region):
"""Get the location constraint an S3 bucket needs so that other AWS
services can connect to it in the given region."""
region = _fix_region(region)
if not region or region in _S3_REGIONS_WITH_NO_LOCATION_CONSTRAINT:
return ''
else:
return region
| StarcoderdataPython |
11248581 | <filename>AtCoder/ABC132/B.py
n = int(input())
p_list = list(map(int, input().split()))
ans = 0
for i in range(n - 2):
p1 = p_list[i]
p2 = p_list[i + 1]
p3 = p_list[i + 2]
if min(p1, p2, p3) != p2 and max(p1, p2, p3) != p2:
ans += 1
print(ans)
| StarcoderdataPython |
3339962 | <filename>test/point_source_tests.py
import numpy as np
import unittest
from paltas.PointSource.point_source_base import PointSourceBase
from paltas.PointSource.single_point_source import SinglePointSource
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.PointSource.point_source import PointSource
from lenstronomy.ImSim.image_model import ImageModel
from lenstronomy.Data.imaging_data import ImageData
from lenstronomy.Util.simulation_util import data_configure_simple
from lenstronomy.Util.data_util import magnitude2cps
from lenstronomy.Data.psf import PSF
class PointSourceBaseTests(unittest.TestCase):
def setUp(self):
self.c = PointSourceBase(point_source_parameters=dict())
def test_update_parameters(self):
# test passing None
self.c.update_parameters(None)
self.assertDictEqual(self.c.point_source_parameters, dict())
# test passing an element that wasn't there yet
self.c.update_parameters({'radius':1.})
self.assertDictEqual(self.c.point_source_parameters, {'radius':1.})
def test_draw_point_source(self):
# Just test that the not implemented error is raised.
with self.assertRaises(NotImplementedError):
self.c.draw_point_source()
class SinglePointSourceTests(PointSourceBaseTests):
def setUp(self):
self.point_source_parameters=dict(
x_point_source=0.001,
y_point_source=0.001,
magnitude=22,
output_ab_zeropoint=25,
compute_time_delays=False
)
self.c = SinglePointSource(
point_source_parameters=self.point_source_parameters)
def test_check_parameterization(self):
# test that the base class actually checks for missing parameters
failed_parameters = dict(x_point_source=0.001,y_point_source=0.001,
magnitude=22)
with self.assertRaises(ValueError):
SinglePointSource(point_source_parameters=failed_parameters)
def test_update_parameters(self):
# test a parameter originally set in setUp
self.point_source_parameters['magnitude'] = 10
self.c.update_parameters(self.point_source_parameters)
self.assertEqual(self.c.point_source_parameters['magnitude'], 10)
def test_draw_point_source(self):
list_model, list_kwargs = self.c.draw_point_source()
# test list_model for ['SOURCE_POSITION']
self.assertTrue('SOURCE_POSITION' in list_model)
# test that all needed parameters are in list_kwargs
params = ('ra_source', 'dec_source', 'point_amp')
for p in params:
self.assertTrue(p in list_kwargs[0].keys())
# now, test with no lens mass
list_ps_model, list_ps_kwargs = self.c.draw_point_source()
# set up lens, source light, point source models
lens_model = LensModel(['SPEP'])
lens_kwargs = [{'theta_E': 0.0, 'e1': 0., 'e2': 0., 'gamma': 0.,
'center_x': 0, 'center_y': 0}]
source_light_model = LightModel(['SERSIC_ELLIPSE'])
source_kwargs = [{'amp':70, 'R_sersic':0.1, 'n_sersic':2.5,
'e1':0., 'e2':0., 'center_x':0.01, 'center_y':0.01}]
point_source_model = PointSource(list_ps_model)
# define PSF class, data class
n_pixels = 64
pixel_width = 0.08
psf_class = PSF(psf_type='NONE')
data_class = ImageData(**data_configure_simple(numPix=n_pixels,
deltaPix=pixel_width))
# draw image with point source
complete_image_model = ImageModel(data_class=data_class,
psf_class=psf_class,lens_model_class=lens_model,
source_model_class=source_light_model,
point_source_class=point_source_model)
image_withPS = complete_image_model.image(kwargs_lens=lens_kwargs,
kwargs_source=source_kwargs, kwargs_ps=list_ps_kwargs)
# draw image without point source
complete_image_model = ImageModel(data_class=data_class,
psf_class=psf_class,lens_model_class=lens_model,
source_model_class=source_light_model,
point_source_class=None)
image_noPS = complete_image_model.image(kwargs_lens=lens_kwargs,
kwargs_source=source_kwargs, kwargs_ps=None)
# take difference to isolate point source
im_diff = image_withPS - image_noPS
# make sure we get a nonzero image out
self.assertTrue(np.sum(im_diff) > 0)
# make sure the flux is what we expect
flux_true = magnitude2cps(self.c.point_source_parameters['magnitude'],
self.c.point_source_parameters['output_ab_zeropoint'])
flux_image = np.sum(im_diff)
self.assertAlmostEqual(flux_true,flux_image)
# make sure light is in the center of the image 128 x 128 image
self.assertTrue(np.sum(im_diff[30:34,30:34]) == flux_image)
# test draw image with mag_pert
self.point_source_parameters['mag_pert'] = [1, 1, 1, 1, 1]
self.c.update_parameters(self.point_source_parameters)
list_model, list_kwargs = self.c.draw_point_source()
# make sure mag_pert is passed to lenstronomy
self.assertTrue('mag_pert' in list_kwargs[0].keys())
| StarcoderdataPython |
4954239 | <filename>neo/Core/State/UnspentCoinState.py
import sys
from .StateBase import StateBase
from .CoinState import CoinState
from neo.IO.BinaryReader import BinaryReader
from neo.IO.MemoryStream import MemoryStream, StreamManager
class UnspentCoinState(StateBase):
Items = None
def __init__(self, items=None):
if items is None:
self.Items = []
else:
self.Items = items
@staticmethod
def FromTXOutputsConfirmed(outputs):
uns = UnspentCoinState()
uns.Items = [0] * len(outputs)
for i in range(0, len(outputs)):
uns.Items[i] = CoinState.Confirmed
return uns
def Size(self):
return super(UnspentCoinState, self).Size() + sys.getsizeof(self.Items)
def IsAllSpent(self):
for item in self.Items:
if item & CoinState.Spent > 0:
return False
return True
def OrEqValueForItemAt(self, index, value):
length = len(self.Items)
while length < index + 1:
self.Items.append(0)
length = len(self.Items)
self.Items[index] |= value
def Deserialize(self, reader):
super(UnspentCoinState, self).Deserialize(reader)
blen = reader.ReadVarInt()
self.Items = [0] * blen
for i in range(0, blen):
self.Items[i] = reader.ReadByte()
@staticmethod
def DeserializeFromDB(buffer):
m = StreamManager.GetStream(buffer)
reader = BinaryReader(m)
uns = UnspentCoinState()
uns.Deserialize(reader)
StreamManager.ReleaseStream(m)
return uns
def Serialize(self, writer):
super(UnspentCoinState, self).Serialize(writer)
writer.WriteVarInt(len(self.Items))
[writer.WriteByte(item) for item in self.Items]
| StarcoderdataPython |
276070 | <filename>deep_gw_pe_followup/__init__.py
import os
DIR = os.path.dirname(__file__)
def get_mpl_style():
return os.path.join(DIR, "plotting.mplstyle")
| StarcoderdataPython |
11359441 | API_REQUEST_ERROR = "\nSomething went wrong. Check your network connection.\n"
CITIES_COUNT_ERROR = (
"\nUnfortunately, no such city has been found. "
"You should enter the exact name of the city "
"to get an accurate forecast.\n"
)
NO_FORECASTS_ERROR = "\nThere is no forecast for tomorrow for this city.\n"
RAINY_ANSWER = "\nIn {city} it will be rainy tomorrow. ๐ง๏ธ\n"
NOT_RAINY_ANSWER = "\nIn {city} it won't be rainy tomorrow.\n"
| StarcoderdataPython |
399003 | import heapq
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
dirs_motion = [
lambda x, y: (x-1, y), # up
lambda x, y: (x+1, y), # down
lambda x, y: (x, y - 1), # left
lambda x, y: (x, y + 1), # right
]
def heuristic(a, b):
(x1, y1) = a
(x2, y2) = b
return abs(x1 - x2) + abs(y1 - y2)
def a_star_search(grid, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for d in dirs_motion:
x, y = d(current[0], current[1])
# check for bounds
if 0 <= x < len(grid) and 0 <= y < len(grid[0]) and grid[x,y]>0:
next = (x,y)
# making all travel as cost 1
new_cost = cost_so_far[current] + 1
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
def getPath(grid, start, goal):
start = tuple(start)
goal = tuple(goal)
came_from_, cost_so_far_ = a_star_search(grid, start, goal)
pointer = goal
path = []
path.append(pointer)
while pointer != start:
path.append(came_from_[pointer])
pointer = came_from_[pointer]
# print("grid", grid)
# print("path", path)
# print("start:",start," goal:",goal)
return path
| StarcoderdataPython |
8030013 | from flask import session, redirect, request, url_for, flash
from functools import wraps
def check_session(f):
@wraps(f)
def func(*args, **kwargs):
if 'loggedIn' not in session:
return redirect(url_for('auth.index'))
return f(*args, **kwargs)
return func | StarcoderdataPython |
1635451 | <reponame>paulondc/chilopoda<filename>src/lib/kombi/Crawler/__init__.py<gh_stars>1-10
from .PathHolder import PathHolder
from .Crawler import Crawler, CrawlerError, CrawlerInvalidVarError, CrawlerInvalidTagError, CrawlerTestError, CrawlerTypeError
from . import Fs
from . import Generic
from .Matcher import Matcher
from .VarExtractor import VarExtractor, VarExtractorError, VarExtractorNotMatchingCharError, VarExtractorMissingSeparatorError, VarExtractorCannotFindExpectedCharError
| StarcoderdataPython |
9601963 | """
๋ณ์ญ๋ช
๋ฌธ๊ฐ๋๊ฒ์๊ธธ๋ ๊ถ๊ธํด์ ์ํ
ํด๋ณธ๊ฑฐ...
์ง์ญ์ฝ๋๊ฐ ์์ธ์ง 2~16๋ฐ์์์ 1์ด๋๊ฐ?
"""
import json
import requests
year = 2018
base_url = 'https://open.mma.go.kr/caisGGGS/hall/mmg/listAllCall.json?yr={year}&jbc_cd={num}&callback'
dutydata_url = 'https://open.mma.go.kr/caisGGGS/hall/mmg/memberCall.json?yr={year}&grno={grno}&callback'
def remove_brackets_json(requests_object):
return json.loads(requests_object.text[1:-1])
gamuns = []
for n in range(2, 16):
url = base_url.format(year=year, num='%02d'%n)
req = remove_brackets_json(requests.get(url))
for x in req['mmgList']:
url = dutydata_url.format(year=year, grno=x['bymyeongmunga_grno'])
x_req = remove_brackets_json(requests.get(url))
x_req = x_req['jwgt_BYMYEONGMUNGAVO']
loc, sign = x_req['bymyeongmunga_grno'].split('-')
gamuns.append({ 'name': x_req['daepyoja_fnm'],
'sign': sign,
'loc': loc,
'serv_t': int(x_req['bokmu_mcnt_sum']),
'serv_n': int(x_req['ihaengja_cnt'])})
print(n, 'done.')
gamuns = sorted(gamuns, key=lambda k: k['serv_n'])
for x in gamuns:
print('{} - {} ์์ฌ {} ๊ฐ๋ฌธ ์ด {}๋ช
{}๊ฐ์ ๋ณต๋ฌด'.format(x['sign'], x['loc'], x['name'], x['serv_n'], x['serv_t']))
| StarcoderdataPython |
1882969 | <reponame>pyghassen/jasmin<filename>jasmin/routing/test/test_throwers.py<gh_stars>0
import mock
from twisted.internet import reactor, defer
from twisted.trial import unittest
from jasmin.queues.factory import AmqpFactory
from jasmin.queues.configs import AmqpConfig
from jasmin.routing.configs import deliverSmHttpThrowerConfig, DLRThrowerConfig
from jasmin.routing.throwers import deliverSmHttpThrower, DLRThrower
from jasmin.routing.content import RoutedDeliverSmContent
from jasmin.managers.content import DLRContent
from jasmin.routing.jasminApi import HttpConnector, SmppClientConnector
from jasmin.vendor.smpp.pdu.operations import DeliverSM
from twisted.web.resource import Resource
from jasmin.routing.test.http_server import LeafServer, TimeoutLeafServer, AckServer, NoAckServer, Error404Server
from twisted.web import server
class deliverSmHttpThrowerTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
# Initiating config objects without any filename
# will lead to setting defaults and that's what we
# need to run the tests
AMQPServiceConfigInstance = AmqpConfig()
AMQPServiceConfigInstance.reconnectOnConnectionLoss = False
self.amqpBroker = AmqpFactory(AMQPServiceConfigInstance)
yield self.amqpBroker.connect()
yield self.amqpBroker.getChannelReadyDeferred()
# Initiating config objects without any filename
# will lead to setting defaults and that's what we
# need to run the tests
deliverSmHttpThrowerConfigInstance = deliverSmHttpThrowerConfig()
# Lower the timeout config to pass the timeout tests quickly
deliverSmHttpThrowerConfigInstance.timeout = 2
deliverSmHttpThrowerConfigInstance.retry_delay = 1
deliverSmHttpThrowerConfigInstance.max_retries = 2
# Launch the deliverSmHttpThrower
self.deliverSmHttpThrower = deliverSmHttpThrower()
self.deliverSmHttpThrower.setConfig(deliverSmHttpThrowerConfigInstance)
# Add the broker to the deliverSmHttpThrower
yield self.deliverSmHttpThrower.addAmqpBroker(self.amqpBroker)
# Test vars:
self.testDeliverSMPdu = DeliverSM(
source_addr='1234',
destination_addr='4567',
short_message='hello !',
)
@defer.inlineCallbacks
def publishRoutedDeliverSmContent(self, routing_key, DeliverSM, msgid, scid, routedConnector):
content = RoutedDeliverSmContent(DeliverSM, msgid, scid, routedConnector)
yield self.amqpBroker.publish(exchange='messaging', routing_key=routing_key, content=content)
@defer.inlineCallbacks
def tearDown(self):
yield self.amqpBroker.disconnect()
yield self.deliverSmHttpThrower.stopService()
class HTTPThrowingTestCases(deliverSmHttpThrowerTestCase):
routingKey = 'deliver_sm_thrower.http'
@defer.inlineCallbacks
def setUp(self):
yield deliverSmHttpThrowerTestCase.setUp(self)
# Start http servers
self.Error404ServerResource = Error404Server()
self.Error404Server = reactor.listenTCP(0, server.Site(self.Error404ServerResource))
self.AckServerResource = AckServer()
self.AckServer = reactor.listenTCP(0, server.Site(self.AckServerResource))
self.NoAckServerResource = NoAckServer()
self.NoAckServer = reactor.listenTCP(0, server.Site(self.NoAckServerResource))
self.TimeoutLeafServerResource = TimeoutLeafServer()
self.TimeoutLeafServerResource.hangTime = 3
self.TimeoutLeafServer = reactor.listenTCP(0, server.Site(self.TimeoutLeafServerResource))
@defer.inlineCallbacks
def tearDown(self):
yield deliverSmHttpThrowerTestCase.tearDown(self)
yield self.Error404Server.stopListening()
yield self.AckServer.stopListening()
yield self.NoAckServer.stopListening()
yield self.TimeoutLeafServer.stopListening()
@defer.inlineCallbacks
def test_throwing_http_connector_with_ack(self):
self.AckServerResource.render_GET = mock.Mock(wraps=self.AckServerResource.render_GET)
routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.AckServer.getHost().port)
content = 'test_throwing_http_connector test content'
self.testDeliverSMPdu.params['short_message'] = content
self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector)
# Wait 3 seconds
exitDeferred = defer.Deferred()
reactor.callLater(3, exitDeferred.callback, None)
yield exitDeferred
# No message retries must be made since ACK was received
self.assertEqual(self.AckServerResource.render_GET.call_count, 1)
callArgs = self.AckServerResource.render_GET.call_args_list[0][0][0].args
self.assertEqual(callArgs['content'][0], self.testDeliverSMPdu.params['short_message'])
self.assertEqual(callArgs['from'][0], self.testDeliverSMPdu.params['source_addr'])
self.assertEqual(callArgs['to'][0], self.testDeliverSMPdu.params['destination_addr'])
@defer.inlineCallbacks
def test_throwing_http_connector_without_ack(self):
self.NoAckServerResource.render_GET = mock.Mock(wraps=self.NoAckServerResource.render_GET)
routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.NoAckServer.getHost().port)
content = 'test_throwing_http_connector test content'
self.testDeliverSMPdu.params['short_message'] = content
self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector)
# Wait 3 seconds
exitDeferred = defer.Deferred()
reactor.callLater(3, exitDeferred.callback, None)
yield exitDeferred
# Retries must be made when ACK is not received
self.assertTrue(self.NoAckServerResource.render_GET.call_count > 1)
callArgs = self.NoAckServerResource.render_GET.call_args_list[0][0][0].args
self.assertEqual(callArgs['content'][0], self.testDeliverSMPdu.params['short_message'])
self.assertEqual(callArgs['from'][0], self.testDeliverSMPdu.params['source_addr'])
self.assertEqual(callArgs['to'][0], self.testDeliverSMPdu.params['destination_addr'])
@defer.inlineCallbacks
def test_throwing_http_connector_timeout_retry(self):
self.TimeoutLeafServerResource.render_GET = mock.Mock(wraps=self.TimeoutLeafServerResource.render_GET)
routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.TimeoutLeafServer.getHost().port)
self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector)
# Wait 12 seconds (timeout is set to 2 seconds in deliverSmHttpThrowerTestCase.setUp(self)
exitDeferred = defer.Deferred()
reactor.callLater(12, exitDeferred.callback, None)
yield exitDeferred
self.assertEqual(self.TimeoutLeafServerResource.render_GET.call_count, 3)
@defer.inlineCallbacks
def test_throwing_http_connector_404_error_noretry(self):
"""When receiving a 404 error, no further retries shall be made
"""
self.Error404ServerResource.render_GET = mock.Mock(wraps=self.Error404ServerResource.render_GET)
routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.Error404Server.getHost().port)
self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector)
# Wait 3 seconds
exitDeferred = defer.Deferred()
reactor.callLater(3, exitDeferred.callback, None)
yield exitDeferred
self.assertEqual(self.Error404ServerResource.render_GET.call_count, 1)
#class SMPPThrowingTestCases(deliverSmHttpThrowerTestCase):
class SMPPThrowingTestCases():
routingKey = 'deliver_sm_thrower.smpp'
def test_throwing_smpp_connector(self):
pass
test_throwing_smpp_connector.skip = 'TODO'
class DLRThrowerTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
# Initiating config objects without any filename
# will lead to setting defaults and that's what we
# need to run the tests
AMQPServiceConfigInstance = AmqpConfig()
AMQPServiceConfigInstance.reconnectOnConnectionLoss = False
self.amqpBroker = AmqpFactory(AMQPServiceConfigInstance)
yield self.amqpBroker.connect()
yield self.amqpBroker.getChannelReadyDeferred()
# Initiating config objects without any filename
# will lead to setting defaults and that's what we
# need to run the tests
DLRThrowerConfigInstance = DLRThrowerConfig()
# Lower the timeout config to pass the timeout tests quickly
DLRThrowerConfigInstance.timeout = 2
DLRThrowerConfigInstance.retry_delay = 1
DLRThrowerConfigInstance.max_retries = 2
# Launch the deliverSmHttpThrower
self.DLRThrower = DLRThrower()
self.DLRThrower.setConfig(DLRThrowerConfigInstance)
# Add the broker to the deliverSmHttpThrower
yield self.DLRThrower.addAmqpBroker(self.amqpBroker)
# Start http servers
self.Error404ServerResource = Error404Server()
self.Error404Server = reactor.listenTCP(0, server.Site(self.Error404ServerResource))
self.AckServerResource = AckServer()
self.AckServer = reactor.listenTCP(0, server.Site(self.AckServerResource))
self.NoAckServerResource = NoAckServer()
self.NoAckServer = reactor.listenTCP(0, server.Site(self.NoAckServerResource))
self.TimeoutLeafServerResource = TimeoutLeafServer()
self.TimeoutLeafServerResource.hangTime = 3
self.TimeoutLeafServer = reactor.listenTCP(0, server.Site(self.TimeoutLeafServerResource))
@defer.inlineCallbacks
def publishDLRContent(self, message_status, msgid, dlr_url, dlr_level, id_smsc = '', sub = '',
dlvrd = '', subdate = '', donedate = '', err = '', text = '', method = 'POST', trycount = 0):
content = DLRContent(message_status, msgid, dlr_url, dlr_level, id_smsc, sub, dlvrd, subdate,
donedate, err, text, method, trycount)
yield self.amqpBroker.publish(exchange='messaging', routing_key='dlr_thrower.http', content=content)
@defer.inlineCallbacks
def tearDown(self):
yield self.amqpBroker.disconnect()
yield self.DLRThrower.stopService()
yield self.Error404Server.stopListening()
yield self.AckServer.stopListening()
yield self.NoAckServer.stopListening()
yield self.TimeoutLeafServer.stopListening()
@defer.inlineCallbacks
def test_throwing_http_connector_with_ack(self):
self.AckServerResource.render_POST = mock.Mock(wraps=self.AckServerResource.render_POST)
dlr_url = 'http://127.0.0.1:%s/dlr' % self.AckServer.getHost().port
dlr_level = 1
msgid = 'anything'
message_status = 'DELIVRD'
self.publishDLRContent(message_status, msgid, dlr_url, dlr_level)
# Wait 3 seconds
exitDeferred = defer.Deferred()
reactor.callLater(3, exitDeferred.callback, None)
yield exitDeferred
# No message retries must be made since ACK was received
self.assertEqual(self.AckServerResource.render_POST.call_count, 1)
@defer.inlineCallbacks
def test_throwing_http_connector_without_ack(self):
self.NoAckServerResource.render_POST = mock.Mock(wraps=self.NoAckServerResource.render_POST)
dlr_url = 'http://127.0.0.1:%s/dlr' % self.NoAckServer.getHost().port
dlr_level = 1
msgid = 'anything'
message_status = 'DELIVRD'
self.publishDLRContent(message_status, msgid, dlr_url, dlr_level)
# Wait 3 seconds
exitDeferred = defer.Deferred()
reactor.callLater(3, exitDeferred.callback, None)
yield exitDeferred
# Retries must be made when ACK is not received
self.assertTrue(self.NoAckServerResource.render_POST.call_count > 1)
@defer.inlineCallbacks
def test_throwing_http_connector_timeout_retry(self):
self.TimeoutLeafServerResource.render_POST = mock.Mock(wraps=self.TimeoutLeafServerResource.render_POST)
dlr_url = 'http://127.0.0.1:%s/dlr' % self.TimeoutLeafServer.getHost().port
dlr_level = 1
msgid = 'anything'
message_status = 'DELIVRD'
self.publishDLRContent(message_status, msgid, dlr_url, dlr_level)
# Wait 9 seconds (timeout is set to 2 seconds in deliverSmHttpThrowerTestCase.setUp(self)
exitDeferred = defer.Deferred()
reactor.callLater(12, exitDeferred.callback, None)
yield exitDeferred
self.assertEqual(self.TimeoutLeafServerResource.render_POST.call_count, 3)
@defer.inlineCallbacks
def test_throwing_http_connector_404_error_noretry(self):
"""When receiving a 404 error, no further retries shall be made
"""
self.Error404ServerResource.render_POST = mock.Mock(wraps=self.Error404ServerResource.render_POST)
dlr_url = 'http://127.0.0.1:%s/dlr' % self.Error404Server.getHost().port
dlr_level = 1
msgid = 'anything'
message_status = 'DELIVRD'
self.publishDLRContent(message_status, msgid, dlr_url, dlr_level)
# Wait 3 seconds
exitDeferred = defer.Deferred()
reactor.callLater(3, exitDeferred.callback, None)
yield exitDeferred
self.assertEqual(self.Error404ServerResource.render_POST.call_count, 1)
@defer.inlineCallbacks
def test_throwing_http_connector_dlr_level1(self):
self.AckServerResource.render_GET = mock.Mock(wraps=self.AckServerResource.render_GET)
dlr_url = 'http://127.0.0.1:%s/dlr' % self.AckServer.getHost().port
dlr_level = 1
msgid = 'anything'
message_status = 'DELIVRD'
self.publishDLRContent(message_status, msgid, dlr_url, dlr_level, method = 'GET')
# Wait 3 seconds
exitDeferred = defer.Deferred()
reactor.callLater(3, exitDeferred.callback, None)
yield exitDeferred
# No message retries must be made since ACK was received
self.assertEqual(self.AckServerResource.render_GET.call_count, 1)
callArgs = self.AckServerResource.render_GET.call_args_list[0][0][0].args
self.assertEqual(callArgs['message_status'][0], message_status)
self.assertEqual(callArgs['id'][0], msgid)
self.assertEqual(callArgs['level'][0], str(dlr_level))
@defer.inlineCallbacks
def test_throwing_http_connector_dlr_level2(self):
self.AckServerResource.render_GET = mock.Mock(wraps=self.AckServerResource.render_GET)
dlr_url = 'http://127.0.0.1:%s/dlr' % self.AckServer.getHost().port
dlr_level = 2
msgid = 'anything'
message_status = 'DELIVRD'
self.publishDLRContent(message_status, msgid, dlr_url, dlr_level, id_smsc = 'abc', sub = '3',
dlvrd = '3', subdate = 'anydate', donedate = 'anydate', err = '', text = 'Any text', method = 'GET')
# Wait 3 seconds
exitDeferred = defer.Deferred()
reactor.callLater(3, exitDeferred.callback, None)
yield exitDeferred
# No message retries must be made since ACK was received
self.assertEqual(self.AckServerResource.render_GET.call_count, 1)
callArgs = self.AckServerResource.render_GET.call_args_list[0][0][0].args
self.assertEqual(callArgs['message_status'][0], message_status)
self.assertEqual(callArgs['id'][0], msgid)
self.assertEqual(callArgs['level'][0], str(dlr_level))
self.assertEqual(callArgs['id_smsc'][0], 'abc')
self.assertEqual(callArgs['sub'][0], '3')
self.assertEqual(callArgs['dlvrd'][0], '3')
self.assertEqual(callArgs['subdate'][0], 'anydate')
self.assertEqual(callArgs['donedate'][0], 'anydate')
self.assertEqual(callArgs['err'][0], '')
self.assertEqual(callArgs['text'][0], 'Any text')
| StarcoderdataPython |
8045934 | <gh_stars>100-1000
from flask import Blueprint, request, jsonify, make_response
from app.roles.models import Roles, RolesSchema
from flask_restful import Api
from app.baseviews import Resource
from app.basemodels import db
from sqlalchemy.exc import SQLAlchemyError
from marshmallow import ValidationError
roles = Blueprint('roles', __name__)
# http://marshmallow.readthedocs.org/en/latest/quickstart.html#declaring-schemas
# https://github.com/marshmallow-code/marshmallow-jsonapi
schema = RolesSchema(strict=True)
api = Api(roles)
# Roles
class CreateListRoles(Resource):
"""http://jsonapi.org/format/#fetching
A server MUST respond to a successful request to fetch an individual resource or resource collection with a 200 OK response.
A server MUST respond with 404 Not Found when processing a request to fetch a single resource that does not exist, except when the request warrants a 200 OK response with null as the primary data (as described above)
a self link as part of the top-level links object"""
def get(self):
roles_query = Roles.query.all()
results = schema.dump(roles_query, many=True).data
return results
"""http://jsonapi.org/format/#crud
A resource can be created by sending a POST request to a URL that represents a collection of roles. The request MUST include a single resource object as primary data. The resource object MUST contain at least a type member.
If a POST request did not include a Client-Generated ID and the requested resource has been created successfully, the server MUST return a 201 Created status code"""
def post(self):
raw_dict = request.get_json(force=True)
try:
schema.validate(raw_dict)
request_dict = raw_dict['data']['attributes']
role = Roles(request_dict['name'],)
role.add(role)
# Should not return password hash
query = Roles.query.get(role.id)
results = schema.dump(query).data
return results, 201
except ValidationError as err:
resp = jsonify({"error": err.messages})
resp.status_code = 403
return resp
except SQLAlchemyError as e:
db.session.rollback()
resp = jsonify({"error": str(e)})
resp.status_code = 403
return resp
class GetUpdateDeleteRole(Resource):
"""http://jsonapi.org/format/#fetching
A server MUST respond to a successful request to fetch an individual resource or resource collection with a 200 OK response.
A server MUST respond with 404 Not Found when processing a request to fetch a single resource that does not exist, except when the request warrants a 200 OK response with null as the primary data (as described above)
a self link as part of the top-level links object"""
def get(self, id):
role_query = Roles.query.get_or_404(id)
result = schema.dump(role_query).data
return result
"""http://jsonapi.org/format/#crud-updating"""
def patch(self, id):
role = Roles.query.get_or_404(id)
raw_dict = request.get_json(force=True)
try:
schema.validate(raw_dict)
request_dict = raw_dict['data']['attributes']
for key, value in request_dict.items():
setattr(role, key, value)
role.update()
return self.get(id)
except ValidationError as err:
resp = jsonify({"error": err.messages})
resp.status_code = 401
return resp
except SQLAlchemyError as e:
db.session.rollback()
resp = jsonify({"error": str(e)})
resp.status_code = 401
return resp
# http://jsonapi.org/format/#crud-deleting
# A server MUST return a 204 No Content status code if a deletion request
# is successful and no content is returned.
def delete(self, id):
role = Roles.query.get_or_404(id)
try:
delete = role.delete(role)
response = make_response()
response.status_code = 204
return response
except SQLAlchemyError as e:
db.session.rollback()
resp = jsonify({"error": str(e)})
resp.status_code = 401
return resp
api.add_resource(CreateListRoles, '.json')
api.add_resource(GetUpdateDeleteRole, '/<int:id>.json')
| StarcoderdataPython |
87135 | <reponame>rituraj-iter/ASSIGNMENTS<filename>Sem-5/PIP/Minor Assignment 8/A8Q3.py<gh_stars>1-10
def power(n, m):
if m == 0:
return 1
elif m == 1:
return n
return (n*power(n, m-1))
print(power(2,3)) | StarcoderdataPython |
11206092 | <reponame>jfklima/prog_pratica
s = 'Fulano'
while s != '':
print(s)
s = s[:-1]
| StarcoderdataPython |
3262595 | """
350. Intersection of Two Arrays II
What if the given array is already sorted? How would you optimize your algorithm?
- Use intersect_three
- Time: O(M+N)
- Space: O(1)
What if nums1's size is small compared to nums2's size? Which algorithm is better?
- Use intersect_two
- Time: O(M+N)
- Space: O(min(M, N))
What if elements of nums2 are stored on disk, and the memory is limited such that you cannot load all elements into the memory at once?
- Modify intersect_two if nums1 can be stored in memory as hashmap
- Yield nums2 in chunks and iterate
What is both of the arrays don't fit in the memory?
- Yield nums1 in chunks and create the lookup
- Yield nums2 in chunks and follow intersect_two
"""
from collections import defaultdict
from typing import List
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
res = []
counter_nums1 = defaultdict(int)
for val in nums1:
counter_nums1[val] +=1
counter_nums2 = defaultdict(int)
for val in nums2:
counter_nums2[val] +=1
for k, v in counter_nums1.items():
if k in counter_nums2.keys():
res.extend(min(v, counter_nums2[k])*[k])
return res
def intersect_two(self, nums1: List[int], nums2: List[int]) -> List[int]:
"""
Optimized version of intersect
Args:
nums1 (List[int]): First array
nums2 (List[int]): Second array
Returns:
List[int]: Array of common elements
"""
if len(nums1) > len(nums2) : self.intersect_two(nums2, nums1)
res = []
counter_nums1 = defaultdict(int)
for val in nums1:
counter_nums1[val] +=1
for val in nums2:
if counter_nums1[val] > 0:
res.append(val)
counter_nums1[val] -= 1
return res
def intersect_three(self, nums1: List[int], nums2: List[int]) -> List[int]:
nums1.sort()
nums2.sort()
res = []
ptr1 = 0
ptr2 = 0
while ptr1 < len(nums1) and ptr2 < len(nums2):
if nums1[ptr1] == nums2[ptr2]:
res.append(nums1[ptr1])
ptr1 += 1
ptr2 += 1
elif nums1[ptr1] < nums2[ptr2]:
ptr1 += 1
else:
ptr2 += 1
return res | StarcoderdataPython |
8103250 | #
# Playlist generator for LightshowPi
# Author: <NAME> (<EMAIL>)
#
# How To:
# cd to the location of the playlist script (i.e. "lightshowpi/tools/generatePlaylist")
# run "python generatePlaylist.py"
# Enter the path to the folder of songs which you desire a playlist for then press <enter> (i.e.
# "/home/pi/lightshowpi/music/sample")
# Playlist file will be created in the folder
# Paths are absolute. Include the whole path to the songs folder. (i.e.
# "/home/pi/lightshowpi/music/christmas")
#
# Updated: <NAME>
# added support to pull title from metadata if it exists
# added support for multiply file types
#
import mutagen
import os
import sys
entries = list()
file_types = [".wav",
".mp1", ".mp2", ".mp3", ".mp4", ".m4a", ".m4b",
".ogg",
".flac",
".oga",
".wma", ".wmv",
".aif"]
make_title = lambda s: s.replace("_", " ").replace(ext, "") + "\t"
location = raw_input("Enter the full path to the folder of songs:")
if not os.path.exists(location):
print "Path does not exists"
sys.exit(1)
print "Generating Playlist"
os.chdir(location)
for song in os.listdir(os.getcwd()):
entry = ""
title = ""
ext = os.path.splitext(song)[1]
if ext in file_types:
metadata = mutagen.File(song, easy=True)
if metadata is not None:
if "title" in metadata:
title = metadata["title"][0] + "\t"
else:
title = make_title(song)
else:
title = make_title(song)
entry = title + os.path.join(os.getcwd(), song)
entries.append(entry)
print entry
print "Writing Playlist to File"
with open(".playlist", "w") as playlist:
playlist.write("\n".join(str(entry) for entry in entries))
print "DONE"
| StarcoderdataPython |
6562163 | <gh_stars>1-10
from . import ServiceMixin, ForecastMixin, EpisodeMixin
from indice_pollution.history.models import Zone
class Service(ServiceMixin):
is_active = True
website = 'http://www.atmo-grandest.eu/'
nom_aasqa = 'ATMO Grand Est'
licence = 'OdbL v1.0'
insee_list = [
'8105', '57463', '67180', '67482', '88160', '57672', '10387', '68224', '68297',
'57227', '68066', '52448', '51454', '54395', '51108'
]
class Forecast(Service, ForecastMixin):
url = 'https://opendata.arcgis.com/api/v3/datasets/b0d57e8f0d5e4cb786cb554eb15c3bcb_0/downloads/data'
outfields = ['*']
params_fetch_all = {
'format': 'geojson',
'spatialRefId': 4326
}
@classmethod
def get_zone(cls, properties):
int_code = properties['code_zone']
code = f"{int_code:05}"
return Zone.get(code=code, type_=properties['type_zone'].lower())
class Episode(Service, EpisodeMixin):
url = 'https://services3.arcgis.com/Is0UwT37raQYl9Jj/arcgis/rest/services/alrt3j_grandest/FeatureServer/0/query' | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.