id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1719815 | <reponame>redhat-cip/dci-control-server<filename>dci/api/v1/tests.py
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from flask import json
from sqlalchemy import exc as sa_exc
from sqlalchemy import sql
from dci.api.v1 import api
from dci.api.v1 import base
from dci.api.v1 import remotecis
from dci.api.v1 import utils as v1_utils
from dci import decorators
from dci.common import exceptions as dci_exc
from dci.common.schemas import (
check_json_is_valid,
clean_json_with_schema,
create_test_schema,
update_test_schema,
check_and_get_args
)
from dci.common import utils
from dci.db import models
_TABLE = models.TESTS
# associate column names with the corresponding SA Column object
_T_COLUMNS = v1_utils.get_columns_name_with_objects(_TABLE)
@api.route('/tests', methods=['POST'])
@decorators.login_required
def create_tests(user):
values = flask.request.json
check_json_is_valid(create_test_schema, values)
values.update(v1_utils.common_values_dict())
# todo: remove team_id
if 'team_id' in values:
del values['team_id']
query = _TABLE.insert().values(**values)
try:
flask.g.db_conn.execute(query)
except sa_exc.IntegrityError:
raise dci_exc.DCICreationConflict(_TABLE.name, 'name')
return flask.Response(
json.dumps({'test': values}), 201,
content_type='application/json'
)
@api.route('/tests/<uuid:t_id>', methods=['PUT'])
@decorators.login_required
def update_tests(user, t_id):
v1_utils.verify_existence_and_get(t_id, _TABLE)
if_match_etag = utils.check_and_get_etag(flask.request.headers)
values = clean_json_with_schema(update_test_schema, flask.request.json)
values['etag'] = utils.gen_etag()
where_clause = sql.and_(
_TABLE.c.etag == if_match_etag,
_TABLE.c.id == t_id
)
query = _TABLE.update().returning(*_TABLE.columns).\
where(where_clause).values(**values)
result = flask.g.db_conn.execute(query)
if not result.rowcount:
raise dci_exc.DCIConflict('Test', t_id)
return flask.Response(
json.dumps({'test': result.fetchone()}), 200,
headers={'ETag': values['etag']},
content_type='application/json'
)
def get_tests_to_issues(topic_id):
query = (sql.select([models.TESTS, models.ISSUES], use_labels=True)
.select_from(models.TESTS.join(
models.JOIN_ISSUES_TESTS).join(models.ISSUES))
.where(models.ISSUES.c.topic_id == topic_id))
tests_join_issues = flask.g.db_conn.execute(query).fetchall()
tests_to_issues = {}
for tji in tests_join_issues:
test_name = tji['tests_name']
issue = {'id': str(tji['issues_id']),
'url': tji['issues_url']}
if test_name not in tests_to_issues:
tests_to_issues[test_name] = [issue]
else:
tests_to_issues[test_name].append(issue)
return tests_to_issues
def get_all_tests_by_team(user, team_id):
# todo: remove team_id
args = check_and_get_args(flask.request.args.to_dict())
query = v1_utils.QueryBuilder(_TABLE, args, _T_COLUMNS)
query.add_extra_condition(_TABLE.c.state != 'archived')
# get the number of rows for the '_meta' section
nb_rows = query.get_number_of_rows()
rows = query.execute(fetchall=True)
rows = v1_utils.format_result(rows, _TABLE.name)
return flask.jsonify({'tests': rows, '_meta': {'count': nb_rows}})
@api.route('/tests', methods=['GET'])
@decorators.login_required
def get_all_tests(user):
return get_all_tests_by_team(user, None)
@api.route('/tests/<uuid:t_id>', methods=['GET'])
@decorators.login_required
def get_test_by_id(user, t_id):
test = v1_utils.verify_existence_and_get(t_id, _TABLE)
res = flask.jsonify({'test': test})
return res
@api.route('/tests/<uuid:t_id>/remotecis', methods=['GET'])
@decorators.login_required
def get_remotecis_by_test(user, test_id):
test = v1_utils.verify_existence_and_get(test_id, _TABLE)
return remotecis.get_all_remotecis(test['id'])
@api.route('/tests/<uuid:t_id>', methods=['DELETE'])
@decorators.login_required
def delete_test_by_id(user, t_id):
v1_utils.verify_existence_and_get(t_id, _TABLE)
with flask.g.db_conn.begin():
values = {'state': 'archived'}
where_clause = _TABLE.c.id == t_id
query = _TABLE.update().where(where_clause).values(**values)
result = flask.g.db_conn.execute(query)
if not result.rowcount:
raise dci_exc.DCIDeleteConflict('Test', t_id)
for model in [models.FILES]:
query = model.update().where(model.c.test_id == t_id).values(
**values
)
flask.g.db_conn.execute(query)
return flask.Response(None, 204, content_type='application/json')
@api.route('/tests/purge', methods=['GET'])
@decorators.login_required
def get_to_purge_archived_tests(user):
return base.get_to_purge_archived_resources(user, _TABLE)
@api.route('/tests/purge', methods=['POST'])
@decorators.login_required
def purge_archived_tests(user):
return base.purge_archived_resources(user, _TABLE)
| StarcoderdataPython |
1758425 | <reponame>Kevinskwk/CV_and_DL_Python<filename>scripts/centroid.py<gh_stars>1-10
# you can tune with calibration.py
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
cv.namedWindow('Mask')
cv.namedWindow('Image')
UpperH = 158 #0-180
UpperS = 222 #0-255
UpperV = 189 #0-255
LowerH = 120 #0-180
LowerS = 14 #0-255
LowerV = 47 #0-255
MinSize = 15000 #Minimun number of white pixels of the mask
Filter = 9 #Must be odd number
last_five = np.zeros((5,2), dtype=int)
DETECT = False
while(True):
ret, frame = cap.read()
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
lower_colour = np.array([LowerH,LowerS,LowerV])
upper_colour = np.array([UpperH,UpperS,UpperV])
mask = cv.inRange(hsv, lower_colour, upper_colour)
mask = cv.medianBlur(mask, 9)
#contours, hierarchy = cv.findContours(mask,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
if sum(sum(mask)) >= MinSize:
DETECT = True
M = cv.moments(mask)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
DETECT = False
cX, cY = 0, 0
#cv.drawContours(frame, mask, -1, (0,255,0), 3)
cv.circle(frame, (cX, cY), 5, (255, 255, 255), -1)
cv.putText(frame, "centroid", (cX - 25, cY - 25),cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv.imshow('Mask', mask)
cv.imshow("Image", frame)
if DETECT:
current_pos = np.array([[cX, cY]])
last_five = np.concatenate((last_five[1:5,:],current_pos))
if np.std(last_five[:,0]) <= 1 and np.std(last_five[:,1]) <= 1:
print(np.average(last_five, axis=0))
if cv.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
print(UpperH, UpperS, UpperV, LowerH, LowerS, LowerV)
cap.release()
cv.destroyAllWindows()
#img = cv.imread("pick.jpg")
#gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# convert the grayscale image to binary image
#cv.waitKey(0) | StarcoderdataPython |
1613709 | import json
import tempfile
from fastapi import Depends, FastAPI
import numpy as np
import requests
from requests.adapters import HTTPAdapter, Retry
from ray._private.test_utils import wait_for_condition
from ray.air.checkpoint import Checkpoint
from ray.air.predictor import DataBatchType, Predictor
from ray.serve.model_wrappers import ModelWrapperDeployment
from ray.serve.pipeline.api import build
from ray.serve.dag import InputNode
from ray.serve.deployment_graph import RayServeDAGHandle
from ray.serve.http_adapters import json_to_ndarray
import ray
from ray import serve
class AdderPredictor(Predictor):
def __init__(self, increment: int) -> None:
self.increment = increment
@classmethod
def from_checkpoint(cls, checkpoint: "AdderCheckpoint") -> "Predictor":
if checkpoint._data_dict:
return cls(checkpoint._data_dict["increment"])
elif checkpoint._local_path: # uri case
with open(checkpoint._local_path) as f:
return cls(json.load(f))
raise Exception("Unreachable")
def predict(self, data: DataBatchType) -> DataBatchType:
return [
{"value": val, "batch_size": len(data)}
for val in (np.array(data) + self.increment).tolist()
]
class AdderCheckpoint(Checkpoint):
pass
def adder_schema(query_param_arg: int) -> DataBatchType:
return np.array([query_param_arg])
@ray.remote
def send_request(**requests_kargs):
return requests.post("http://localhost:8000/Adder/", **requests_kargs).json()
def test_simple_adder(serve_instance):
ModelWrapperDeployment.options(name="Adder").deploy(
predictor_cls=AdderPredictor,
checkpoint=AdderCheckpoint.from_dict({"increment": 2}),
)
resp = ray.get(send_request.remote(json={"array": [40]}))
assert resp == {"value": [42], "batch_size": 1}
def test_batching(serve_instance):
ModelWrapperDeployment.options(name="Adder").deploy(
predictor_cls=AdderPredictor,
checkpoint=AdderCheckpoint.from_dict({"increment": 2}),
batching_params=dict(max_batch_size=2, batch_wait_timeout_s=1000),
)
refs = [send_request.remote(json={"array": [40]}) for _ in range(2)]
for resp in ray.get(refs):
assert resp == {"value": [42], "batch_size": 2}
app = FastAPI()
@serve.deployment(route_prefix="/ingress")
@serve.ingress(app)
class Ingress:
def __init__(self, dag: RayServeDAGHandle) -> None:
self.dag = dag
@app.post("/")
async def predict(self, data=Depends(json_to_ndarray)):
return await self.dag.remote(data)
def test_model_wrappers_in_pipeline(serve_instance):
_, path = tempfile.mkstemp()
with open(path, "w") as f:
json.dump(2, f)
predictor_cls = "ray.serve.tests.test_model_wrappers.AdderPredictor"
checkpoint_cls = "ray.serve.tests.test_model_wrappers.AdderCheckpoint"
with InputNode() as dag_input:
m1 = ModelWrapperDeployment.bind(
predictor_cls=predictor_cls, # TODO: can't be the raw class right now?
checkpoint={ # TODO: can't be the raw object right now?
"checkpoint_cls": checkpoint_cls,
"uri": path,
},
)
dag = m1.predict.bind(dag_input)
deployments = build(Ingress.bind(dag))
for d in deployments:
d.deploy()
resp = requests.post("http://1192.168.3.11:8000/ingress", json={"array": [40]})
print(resp.text)
resp.raise_for_status()
return resp.json() == {"value": [42], "batch_size": 1}
# NOTE(simon): Make sure this is the last test because the REST API will start
# controller and http proxy in another namespace.
def test_yaml_compatibility(serve_instance):
_, path = tempfile.mkstemp()
with open(path, "w") as f:
json.dump(2, f)
session = requests.Session()
retries = Retry(total=5, backoff_factor=0.1)
session.mount("http://", HTTPAdapter(max_retries=retries))
# TODO(simon): use ServeSubmissionClient when it's merged.
predictor_cls = "ray.serve.tests.test_model_wrappers.AdderPredictor"
checkpoint_cls = "ray.serve.tests.test_model_wrappers.AdderCheckpoint"
schema_func = "ray.serve.tests.test_model_wrappers.adder_schema"
resp = session.put(
"http://127.0.0.1:8265/api/serve/deployments/",
json={
"deployments": [
{
"name": "Adder",
"import_path": "ray.serve.model_wrappers.ModelWrapperDeployment",
"init_kwargs": {
"predictor_cls": predictor_cls,
"checkpoint": {
"checkpoint_cls": checkpoint_cls,
"uri": path,
},
"http_adapter": schema_func,
"batching_params": {"max_batch_size": 1},
},
}
]
},
)
resp.raise_for_status()
# Note(simon): The Serve HTTP deploy is non blocking,
# so we retries to make sure the deployment is up
def cond():
resp = ray.get(send_request.remote(params={"query_param_arg": 40}))
return resp == {"value": [42], "batch_size": 1}
wait_for_condition(cond)
| StarcoderdataPython |
3210214 | import numpy as np
from model.error import Error
from model.decision_tree.decision_tree import DecisionTree
class RandomForest:
def __init__(self, input_attr=0, output_attr=0, num=20, feature_sampling=np.log2):
if input_attr != 0 and input_attr != 1:
raise Error("Invalid input_attr!")
if output_attr != 0 and output_attr != 1:
raise Error("Invalid output_attr!")
self.num = num
self.input_attr = input_attr
self.output_attr = output_attr
self.feature_sampling = feature_sampling
self.trees = list()
for i in range(num):
tree = DecisionTree(input_attr, output_attr, type="CART")
self.trees.append(tree)
def bootstrap(self, x, y):
sampling_x, sampling_y = list(), list()
n = x.shape[0]
for i in range(n):
index = np.random.randint(n)
sampling_x.append(x[index])
sampling_y.append(y[index])
return np.array(sampling_x), np.array(sampling_y)
def fit(self, x, y):
for i in range(self.num):
sampling_x, sampling_y = self.bootstrap(x, y)
self.trees[i].fit(sampling_x, sampling_y, self.feature_sampling)
def predict(self, x):
res = list()
for i in range(self.num):
res.append(self.trees[i].predict(x))
res = np.array(res)
if self.output_attr == 1:
return np.sum(res) / len(res)
else:
labels, cnts = np.unique(res, return_counts=True)
label, max_cnt = None, 0
for i in range(len(labels)):
if max_cnt < cnts[i]:
max_cnt = cnts[i]
label = labels[i]
return label | StarcoderdataPython |
88225 | # Feel free to modifiy this file.
# It will only be used to verify the settings are correct
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from dataloader import CustomDataset
from submission import get_model
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint-dir', type=str)
args = parser.parse_args()
train_transform = transforms.Compose([
transforms.ToTensor(),
])
trainset = CustomDataset(root='/dataset', split="train", transform=train_transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=256, shuffle=True, num_workers=2)
net = get_model()
net = torch.nn.DataParallel(net)
net = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.1)
print('Start Training')
net.train()
for epoch in range(10):
running_loss = 0.0
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs, labels = inputs.cuda(), labels.cuda()
outputs = net(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 10 == 9: # print every 10 mini-batches
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 10))
running_loss = 0.0
print('Finished Training')
os.makedirs(args.checkpoint_dir, exist_ok=True)
torch.save(net.module.state_dict(), os.path.join(args.checkpoint_dir, "net_demo.pth"))
print(f"Saved checkpoint to {os.path.join(args.checkpoint_dir, 'net_demo.pth')}")
| StarcoderdataPython |
3367437 | """Misc zlib tests
Made for Jython.
"""
import unittest
import zlib
from array import array
from test import test_support
class ArrayTestCase(unittest.TestCase):
def test_array(self):
self._test_array(zlib.compress, zlib.decompress)
def test_array_compressobj(self):
def compress(value):
co = zlib.compressobj()
return co.compress(value) + co.flush()
def decompress(value):
dco = zlib.decompressobj()
return dco.decompress(value) + dco.flush()
self._test_array(compress, decompress)
def _test_array(self, compress, decompress):
self.assertEqual(compress(array('c', 'jython')), compress('jython'))
intarray = array('i', range(5))
self.assertEqual(compress(intarray), compress(intarray.tostring()))
compressed = array('c', compress('jython'))
self.assertEqual('jython', decompress(compressed))
def test_main():
test_support.run_unittest(ArrayTestCase)
if __name__ == '__main__':
test_main()
| StarcoderdataPython |
4445 | <reponame>jean1042/plugin-azure-cloud-services
import logging
from spaceone.inventory.libs.connector import AzureConnector
from spaceone.inventory.error import *
from spaceone.inventory.error.custom import *
__all__ = ['SnapshotConnector']
_LOGGER = logging.getLogger(__name__)
class SnapshotConnector(AzureConnector):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_connect(kwargs.get('secret_data'))
def list_snapshots(self):
try:
return self.compute_client.snapshots.list()
except ConnectionError:
_LOGGER.error(ERROR_CONNECTOR(field='Public IP Address'))
| StarcoderdataPython |
3382764 | <reponame>Atzingen/curso-IoT-2017<filename>aula-10-mqttbroker/mqtt_inicio/publish_mqtt.py<gh_stars>1-10
import paho.mqtt.client as mqtt
client = mqtt.Client()
client.connect('192.168.127.12')
client.publish('teste','ligado')
| StarcoderdataPython |
3257014 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.31
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _efitlib
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
class ellipsoid(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ellipsoid, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ellipsoid, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _efitlib.ellipsoid_name_set
__swig_getmethods__["name"] = _efitlib.ellipsoid_name_get
if _newclass:name = _swig_property(_efitlib.ellipsoid_name_get, _efitlib.ellipsoid_name_set)
__swig_setmethods__["position"] = _efitlib.ellipsoid_position_set
__swig_getmethods__["position"] = _efitlib.ellipsoid_position_get
if _newclass:position = _swig_property(_efitlib.ellipsoid_position_get, _efitlib.ellipsoid_position_set)
__swig_setmethods__["axis"] = _efitlib.ellipsoid_axis_set
__swig_getmethods__["axis"] = _efitlib.ellipsoid_axis_get
if _newclass:axis = _swig_property(_efitlib.ellipsoid_axis_get, _efitlib.ellipsoid_axis_set)
__swig_setmethods__["orientation"] = _efitlib.ellipsoid_orientation_set
__swig_getmethods__["orientation"] = _efitlib.ellipsoid_orientation_get
if _newclass:orientation = _swig_property(_efitlib.ellipsoid_orientation_get, _efitlib.ellipsoid_orientation_set)
__swig_setmethods__["inv_orientation"] = _efitlib.ellipsoid_inv_orientation_set
__swig_getmethods__["inv_orientation"] = _efitlib.ellipsoid_inv_orientation_get
if _newclass:inv_orientation = _swig_property(_efitlib.ellipsoid_inv_orientation_get, _efitlib.ellipsoid_inv_orientation_set)
__swig_setmethods__["tensor"] = _efitlib.ellipsoid_tensor_set
__swig_getmethods__["tensor"] = _efitlib.ellipsoid_tensor_get
if _newclass:tensor = _swig_property(_efitlib.ellipsoid_tensor_get, _efitlib.ellipsoid_tensor_set)
def getPosition(*args): return _efitlib.ellipsoid_getPosition(*args)
def getAxis(*args): return _efitlib.ellipsoid_getAxis(*args)
def getOrientation(*args): return _efitlib.ellipsoid_getOrientation(*args)
def __init__(self, *args):
this = _efitlib.new_ellipsoid(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _efitlib.delete_ellipsoid
__del__ = lambda self : None;
ellipsoid_swigregister = _efitlib.ellipsoid_swigregister
ellipsoid_swigregister(ellipsoid)
class efit_info(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, efit_info, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, efit_info, name)
__repr__ = _swig_repr
__swig_setmethods__["weightflag"] = _efitlib.efit_info_weightflag_set
__swig_getmethods__["weightflag"] = _efitlib.efit_info_weightflag_get
if _newclass:weightflag = _swig_property(_efitlib.efit_info_weightflag_get, _efitlib.efit_info_weightflag_set)
__swig_setmethods__["covarflag"] = _efitlib.efit_info_covarflag_set
__swig_getmethods__["covarflag"] = _efitlib.efit_info_covarflag_get
if _newclass:covarflag = _swig_property(_efitlib.efit_info_covarflag_get, _efitlib.efit_info_covarflag_set)
__swig_setmethods__["volumeflag"] = _efitlib.efit_info_volumeflag_set
__swig_getmethods__["volumeflag"] = _efitlib.efit_info_volumeflag_get
if _newclass:volumeflag = _swig_property(_efitlib.efit_info_volumeflag_get, _efitlib.efit_info_volumeflag_set)
__swig_setmethods__["matrixflag"] = _efitlib.efit_info_matrixflag_set
__swig_getmethods__["matrixflag"] = _efitlib.efit_info_matrixflag_get
if _newclass:matrixflag = _swig_property(_efitlib.efit_info_matrixflag_get, _efitlib.efit_info_matrixflag_set)
__swig_setmethods__["nocenterflag"] = _efitlib.efit_info_nocenterflag_set
__swig_getmethods__["nocenterflag"] = _efitlib.efit_info_nocenterflag_get
if _newclass:nocenterflag = _swig_property(_efitlib.efit_info_nocenterflag_get, _efitlib.efit_info_nocenterflag_set)
__swig_setmethods__["noscaleflag"] = _efitlib.efit_info_noscaleflag_set
__swig_getmethods__["noscaleflag"] = _efitlib.efit_info_noscaleflag_get
if _newclass:noscaleflag = _swig_property(_efitlib.efit_info_noscaleflag_get, _efitlib.efit_info_noscaleflag_set)
__swig_setmethods__["nosortflag"] = _efitlib.efit_info_nosortflag_set
__swig_getmethods__["nosortflag"] = _efitlib.efit_info_nosortflag_get
if _newclass:nosortflag = _swig_property(_efitlib.efit_info_nosortflag_get, _efitlib.efit_info_nosortflag_set)
__swig_setmethods__["count"] = _efitlib.efit_info_count_set
__swig_getmethods__["count"] = _efitlib.efit_info_count_get
if _newclass:count = _swig_property(_efitlib.efit_info_count_get, _efitlib.efit_info_count_set)
__swig_setmethods__["cov_scale"] = _efitlib.efit_info_cov_scale_set
__swig_getmethods__["cov_scale"] = _efitlib.efit_info_cov_scale_get
if _newclass:cov_scale = _swig_property(_efitlib.efit_info_cov_scale_get, _efitlib.efit_info_cov_scale_set)
__swig_setmethods__["ell_scale"] = _efitlib.efit_info_ell_scale_set
__swig_getmethods__["ell_scale"] = _efitlib.efit_info_ell_scale_get
if _newclass:ell_scale = _swig_property(_efitlib.efit_info_ell_scale_get, _efitlib.efit_info_ell_scale_set)
def __init__(self, *args):
this = _efitlib.new_efit_info(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _efitlib.delete_efit_info
__del__ = lambda self : None;
efit_info_swigregister = _efitlib.efit_info_swigregister
efit_info_swigregister(efit_info)
fitEllipse = _efitlib.fitEllipse
vec_normalize = _efitlib.vec_normalize
vec_centroid = _efitlib.vec_centroid
vec_dot = _efitlib.vec_dot
vec_magsq = _efitlib.vec_magsq
vec_mag = _efitlib.vec_mag
vec_distancesq = _efitlib.vec_distancesq
vec_distance = _efitlib.vec_distance
vec_max = _efitlib.vec_max
vec_length = _efitlib.vec_length
vec_ctos = _efitlib.vec_ctos
vec_stoc = _efitlib.vec_stoc
vec_sub = _efitlib.vec_sub
vec_copy = _efitlib.vec_copy
vec_add = _efitlib.vec_add
vec_scale = _efitlib.vec_scale
vec_zero = _efitlib.vec_zero
vec_cross = _efitlib.vec_cross
vec_mult = _efitlib.vec_mult
vec_offset = _efitlib.vec_offset
vec_rand = _efitlib.vec_rand
vec_average = _efitlib.vec_average
vec_transform = _efitlib.vec_transform
vec_ftransform = _efitlib.vec_ftransform
mat_jacobi = _efitlib.mat_jacobi
quat_to_mat = _efitlib.quat_to_mat
mat_to_quat = _efitlib.mat_to_quat
| StarcoderdataPython |
3241300 | #Escreva um programa que faça o computador "pensar" em um número inteiro
#entre 0 e 5 e peça para o usuário tentar descobrir qual foi o número
#escolhido pelo computador.
#O programa deverá escrever na tela
#se o usuário venceu ou perdeu.
from random import randint
from time import sleep
print('_____<NAME>_____')
print('O computador escolherá um número!')
n = randint(0, 5)
print('Escolhendo...')
sleep(1.5)
print('Ja escolhi! Agora é a sua vez!')
u = int(input('Digite um número de 0 a 5: '))
if n == u:
print('Usuário VENCEU! Parabens!')
else:
print('Usuário PERDEU. Tente novamente!')
print(f'O computador escolheu {n} e o usuário escolheu {u}')
| StarcoderdataPython |
58779 | <filename>tests/riscv/vector/vector_indexed_load_store_force.py
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from VectorTestSequence import VectorLoadStoreTestSequence
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
# This test verifies that indexed load and store instructions can be generated and executed
# successfully.
class MainSequence(VectorLoadStoreTestSequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mInstrList = (
"VLOXEI16.V##RISCV",
"VLOXEI32.V##RISCV",
"VLOXEI64.V##RISCV",
"VLOXEI8.V##RISCV",
"VLUXEI16.V##RISCV",
"VLUXEI32.V##RISCV",
"VLUXEI64.V##RISCV",
"VLUXEI8.V##RISCV",
"VSOXEI16.V##RISCV",
"VSOXEI32.V##RISCV",
"VSOXEI64.V##RISCV",
"VSOXEI8.V##RISCV",
"VSUXEI16.V##RISCV",
"VSUXEI32.V##RISCV",
"VSUXEI64.V##RISCV",
"VSUXEI8.V##RISCV",
)
# Return a list of test instructions to randomly choose from.
def _getInstructionList(self):
return self._mInstrList
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| StarcoderdataPython |
1723990 | <reponame>mitodl/codejail
"""A proxy subprocess-making process for CodeJail."""
import ast
import logging
import os
import os.path
import subprocess
import sys
import time
import six
from six.moves import range
from .subproc import run_subprocess
log = logging.getLogger("codejail")
# We use .readline to get data from the pipes between the processes, so we need
# to ensure that a newline does not appear in the data. We also need a way to
# communicate a few values, and unpack them. Lastly, we need to be sure we can
# handle binary data. Serializing with repr() and deserializing the literals
# that result give us all the properties we need.
if six.PY2:
# Python 2: everything is bytes everywhere.
serialize_in = serialize_out = repr
deserialize_in = deserialize_out = ast.literal_eval
else:
# Python 3: the outside of subprocess talks in bytes (the pipes from
# subprocess.* are all about bytes). The inside of the Python code it runs
# talks in text (reading from sys.stdin is text, writing to sys.stdout
# expects text).
def serialize_out(val):
"""Send data out of the proxy process. Needs to make unicode."""
return repr(val)
def serialize_in(val):
"""Send data into the proxy process. Needs to make bytes."""
return serialize_out(val).encode('utf8')
def deserialize_in(ustr):
"""Get data into the proxy process. Needs to take unicode."""
return ast.literal_eval(ustr)
def deserialize_out(bstr):
"""Get data from the proxy process. Needs to take bytes."""
return deserialize_in(bstr.decode('utf8'))
##
# Client code, runs in the parent CodeJail process.
##
def run_subprocess_through_proxy(*args, **kwargs):
"""
Works just like :ref:`run_subprocess`, but through the proxy process.
This will retry a few times if need be.
"""
last_exception = None
for _tries in range(3):
try:
proxy = get_proxy()
# Write the args and kwargs to the proxy process.
proxy_stdin = serialize_in((args, kwargs))
proxy.stdin.write(proxy_stdin+b"\n")
proxy.stdin.flush()
# Read the result from the proxy. This blocks until the process
# is done.
proxy_stdout = proxy.stdout.readline()
if not proxy_stdout:
# EOF: the proxy must have died.
raise Exception("Proxy process died unexpectedly!")
status, stdout, stderr, log_calls = deserialize_out(proxy_stdout.rstrip())
# Write all the log messages to the log, and return.
for level, msg, args in log_calls:
log.log(level, msg, *args)
return status, stdout, stderr
except Exception: # pylint: disable=broad-except
log.exception("Proxy process failed")
# Give the proxy process a chance to die completely if it is dying.
time.sleep(.001)
last_exception = sys.exc_info()
continue
# If we finished all the tries, then raise the last exception we got.
if last_exception:
six.reraise(*last_exception)
# There is one global proxy process.
PROXY_PROCESS = None
def get_proxy():
# pylint: disable=missing-function-docstring
global PROXY_PROCESS # pylint: disable=global-statement
# If we had a proxy process, but it died, clean up.
if PROXY_PROCESS is not None:
status = PROXY_PROCESS.poll()
if status is not None:
log.info(
"CodeJail proxy process (pid %d) ended with status code %d",
PROXY_PROCESS.pid,
status
)
PROXY_PROCESS = None
# If we need a proxy, make a proxy.
if PROXY_PROCESS is None:
# Start the proxy by invoking proxy_main.py in our root directory.
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
proxy_main_py = os.path.join(root, "proxy_main.py")
# Run proxy_main.py with the same Python that is running us. "-u" makes
# the stdin and stdout unbuffered. We pass the log level of the
# "codejail" log so that the proxy can send back an appropriate level
# of detail in the log messages.
log_level = log.getEffectiveLevel()
cmd = [sys.executable, '-u', proxy_main_py, str(log_level)]
PROXY_PROCESS = subprocess.Popen(
args=cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
log.info("Started CodeJail proxy process (pid %d)", PROXY_PROCESS.pid)
return PROXY_PROCESS
##
# Proxy process code
##
class CapturingHandler(logging.Handler):
"""
A logging Handler that captures all the log calls, for later replay.
NOTE: this doesn't capture all aspects of the log record. It only captures
the log level, the message string, and the arguments. It does not capture
the caller, the current exception, the current time, etc.
"""
# pylint wants us to override emit().
# pylint: disable=abstract-method
def __init__(self):
super().__init__()
self.log_calls = []
def createLock(self):
self.lock = None
def handle(self, record):
self.log_calls.append((record.levelno, record.msg, record.args))
def get_log_calls(self):
# pylint: disable=missing-function-docstring
retval = self.log_calls
self.log_calls = []
return retval
def proxy_main(argv):
"""
The main program for the proxy process.
It does this:
* Reads a line from stdin with the repr of a tuple: (args, kwargs)
* Calls :ref:`run_subprocess` with *args, **kwargs
* Writes one line to stdout: the repr of the return value from
`run_subprocess` and the log calls made:
(status, stdout, stderr, log_calls) .
The process ends when its stdin is closed.
`argv` is the argument list of the process, from sys.argv. The only
argument is the logging level for the "codejail" log in the parent
process. Since we tunnel our logging back to the parent, we don't want to
send everything, just the records that the parent will actually log.
"""
# We don't want to see any noise on stderr.
sys.stderr = open(os.devnull, "w")
# Capture all logging messages.
capture_log = CapturingHandler()
log.addHandler(capture_log)
log.setLevel(int(argv[1]) or logging.DEBUG)
log.debug("Starting proxy process")
try:
while True:
stdin = sys.stdin.readline()
log.debug("proxy stdin: %r", stdin)
if not stdin:
break
args, kwargs = deserialize_in(stdin.rstrip())
status, stdout, stderr = run_subprocess(*args, **kwargs)
log.debug(
"run_subprocess result: status=%r\nstdout=%r\nstderr=%r",
status, stdout, stderr,
)
log_calls = capture_log.get_log_calls()
stdout = serialize_out((status, stdout, stderr, log_calls))
sys.stdout.write(stdout+"\n")
sys.stdout.flush()
except Exception: # pylint: disable=broad-except
# Note that this log message will not get back to the parent, because
# we are dying and not communicating back to the parent. This will be
# useful only if you add another handler at the top of this function.
log.exception("Proxy dying due to exception")
log.debug("Exiting proxy process")
| StarcoderdataPython |
3234667 | <filename>svm.py
import os
from csv import reader
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.svm import LinearSVC
def norm(arr):
x_max = max(arr)
x_min = min(arr)
for i in range(len(arr)):
arr[i] = (arr[i] - x_min) / (x_max - x_min)
return arr
def main():
# Load training data
with open("data/AAL_statistics_volumn_train.csv") as f:
csv_data = reader(f, delimiter=',')
raw_data = np.array(list(csv_data))
# Preprocess training data
x_train = []
y_train = []
data_count = len(raw_data)
tuple_len = len(raw_data[0])
for i in raw_data:
temp = norm([int(j) for j in i[1:tuple_len - 2]])
x_train.append(temp)
if i[tuple_len - 1] == "yes":
y_train.append(1)
else:
y_train.append(0)
# Load test data
with open("data/AAL_statistics_volumn_test.csv") as f:
csv_data = reader(f, delimiter=',')
raw_data = np.array(list(csv_data))
# Preprocess test data
x_test = []
y_test = []
data_count = len(raw_data)
tuple_len = len(raw_data[0])
for i in raw_data:
temp = norm([int(j) for j in i[1:tuple_len - 1]])
x_test.append(temp)
#predict
clf = LinearSVC(loss="hinge", random_state=42).fit(x_train, y_train)
y_test = clf.predict(x_test)
print(y_test)
# Split dataset
xt_train, xt_test, yt_train, yt_test = train_test_split(x_train, y_train, test_size=0.33, random_state=73)
#accuracy
clf = LinearSVC(loss="hinge", random_state=42).fit(xt_train, yt_train)
print(clf.score(xt_test, yt_test))
if __name__ == "__main__":
main() | StarcoderdataPython |
60453 | import sys
import typing
from metal.serial import Engine
from metal.serial.hooks import MacroHook
from metal.serial.preprocessor import MacroExpansion
class Argv(MacroHook):
identifier = 'METAL_SERIAL_INIT_ARGV'
def invoke(self, engine: Engine, macro_expansion: MacroExpansion):
engine.write_int(len(self.argv))
data = b'\0'.join(arg.encode() for arg in self.argv) + b'\0'
res = engine.write_memory(data)
if res != len(data):
print("***metal.serial***: Couldn't write all of argv, buffer size was {}".format(res), file=sys.stderr)
def __init__(self, argv: typing.List[str]):
self.argv = argv
super().__init__()
def build_argv_hook(argv: typing.List[str]):
return lambda : Argv(argv) | StarcoderdataPython |
22997 | <reponame>project-pantheon/pantheon_glob_planner
# (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import os.path
import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
import cartopy.io.shapereader as shp
@pytest.mark.natural_earth
class TestLakes(object):
def setup_class(self):
LAKES_PATH = os.path.join(os.path.dirname(__file__),
'lakes_shapefile', 'ne_110m_lakes.shp')
self.reader = shp.Reader(LAKES_PATH)
names = [record.attributes['name'] for record in self.reader.records()]
# Choose a nice small lake
print([name for name in names if 'Nicaragua' in name])
self.lake_name = 'Lago de\rNicaragua'
self.lake_index = names.index(self.lake_name)
self.test_lake_geometry = \
list(self.reader.geometries())[self.lake_index]
self.test_lake_record = list(self.reader.records())[self.lake_index]
def test_geometry(self):
lake_geometry = self.test_lake_geometry
assert lake_geometry.type == 'MultiPolygon'
assert len(lake_geometry) == 1
polygon = lake_geometry[0]
expected = np.array([(-84.85548682324658, 11.147898667846633),
(-85.29013729525353, 11.176165676310276),
(-85.79132117383625, 11.509737046754324),
(-85.8851655748783, 11.900100816287136),
(-85.5653401354239, 11.940330918826362),
(-85.03684526237491, 11.5216484643976),
(-84.85548682324658, 11.147898667846633),
(-84.85548682324658, 11.147898667846633)])
assert_array_almost_equal(expected, polygon.exterior.coords)
assert len(polygon.interiors) == 0
def test_record(self):
lake_record = self.test_lake_record
assert lake_record.attributes.get('name') == self.lake_name
expected = sorted(['admin', 'featurecla', 'min_label', 'min_zoom',
'name', 'name_alt', 'scalerank'])
actual = sorted(lake_record.attributes.keys())
assert actual == expected
assert lake_record.geometry == self.test_lake_geometry
def test_bounds(self):
# tests that a file which has a record with a bbox can
# use the bbox without first creating the geometry
record = next(self.reader.records())
assert not record._geometry, \
'The geometry was loaded before it was needed.'
assert len(record._bounds) == 4
assert record._bounds == record.bounds
assert not record._geometry, \
'The geometry was loaded in order to create the bounds.'
@pytest.mark.natural_earth
class TestRivers(object):
def setup_class(self):
RIVERS_PATH = shp.natural_earth(resolution='110m',
category='physical',
name='rivers_lake_centerlines')
self.reader = shp.Reader(RIVERS_PATH)
names = [record.attributes['name'] for record in self.reader.records()]
# Choose a nice small river
self.river_name = 'Peace'
self.river_index = names.index(self.river_name)
self.test_river_geometry = \
list(self.reader.geometries())[self.river_index]
self.test_river_record = list(self.reader.records())[self.river_index]
def test_geometry(self):
geometry = self.test_river_geometry
assert geometry.type == 'MultiLineString'
assert len(geometry) == 1
linestring = geometry[0]
coords = linestring.coords
assert round(abs(coords[0][0] - -124.83563045947423), 7) == 0
assert round(abs(coords[0][1] - 56.75692352968272), 7) == 0
assert round(abs(coords[1][0] - -124.20045039940291), 7) == 0
assert round(abs(coords[1][1] - 56.243492336646824), 7) == 0
def test_record(self):
records = list(self.reader.records())
assert len(records) == len(self.reader)
# Choose a nice small lake
river_record = records[self.river_index]
expected_attributes = {'featurecla': 'River',
'min_label': 3.1,
'min_zoom': 2.1,
'name': self.river_name,
'name_en': self.river_name,
'scalerank': 2}
for key, value in river_record.attributes.items():
if key in expected_attributes:
assert value == expected_attributes[key]
assert river_record.geometry == self.test_river_geometry
| StarcoderdataPython |
178274 | <filename>api/urls.py
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from api.views import (BreedViewSet, CommentViewSet, GroupViewSet, PetViewSet,
PostViewSet, SpeciesViewSet, UserCodeViewSet,
UserTokenViewSet, UserViewSet)
router = DefaultRouter()
router.register('species', SpeciesViewSet, basename='species')
router.register('breed', BreedViewSet, basename='breed')
router.register(r'groups', GroupViewSet, basename='groups')
router.register(r'pets', PetViewSet, basename='pets')
router.register(r'users', UserViewSet, basename='users')
router.register(
r'posts/(?P<post_id>[\d]+)/comments',
CommentViewSet, basename='comments'
)
router.register(r'posts', PostViewSet, basename='posts')
auth_urlpatterns = [
path('email/', UserCodeViewSet.as_view()),
path('token/', UserTokenViewSet.as_view()),
]
urlpatterns = [
path('v1/auth/', include(auth_urlpatterns)),
path('v1/', include(router.urls)),
]
| StarcoderdataPython |
1647320 | <gh_stars>1-10
#!/usr/bin/env python
# pylint: disable=import-error,line-too-long
"""
Generate sample data for manual testing.
"""
import os
from crowdsorter.settings import get_config
from crowdsorter.factory import create_app
from crowdsorter.models import Collection, Item, Redirect
create_app(get_config(os.getenv('FLASK_ENV') or 'dev'))
collection = Collection(name="Empty Collection", key='_empty')
collection.save()
collection = Collection(name="New Collection", key='_new')
collection.add("Foo")
collection.add("Bar")
collection.add("A really long name with lots of words.")
collection.add("special_character:#")
collection.add("special_character:&")
collection.add("special_character:'")
collection.save()
collection = Collection(name="Colors", key='_sample', code='sample',
private=True)
collection.add("Red")
collection.add("Green")
collection.add("Blue")
collection.add("Yellow")
collection.save()
collection = Collection(name="Numbers", key='_numbers', code='numbers')
collection.items = [Item(name=str(n)).save() for n in range(1, 21)]
collection.save()
collection = Collection(name="Private Collection",
key='_private', private=True)
collection.items = [
Item(name="Secret One").save(),
Item(name="Secret Two").save(),
]
collection.save()
collection = Collection(name="Locked Collection",
key='_locked', locked=True)
collection.items = [
Item(name="Item One").save(),
Item(name="Item Two").save(),
]
collection.save()
collection = Collection(name="Metadata Collection",
key='_metadata', code='dogs')
collection.add(
"Collie",
description="Medium-sized, fairly lightly built dog, with a pointed snout.",
image_url="https://upload.wikimedia.org/wikipedia/commons/e/e4/Border_Collie_600.jpg",
ref_url="https://en.wikipedia.org/wiki/Collie",
)
collection.add(
"Golden Retriever",
description="Instinctive love of water, and are easy to train to basic or advanced obedience standards.",
image_url="https://upload.wikimedia.org/wikipedia/commons/9/93/Golden_Retriever_Carlos_%2810581910556%29.jpg",
ref_url="https://en.wikipedia.org/wiki/Golden_Retriever",
)
collection.add(
"Pug",
description="The Pug is a breed of dog with a wrinkly, short-muzzled face, and curled tail.",
image_url="https://upload.wikimedia.org/wikipedia/commons/thumb/7/7f/Pug_portrait.jpg/400px-Pug_portrait.jpg",
ref_url="https://en.wikipedia.org/wiki/Pug",
)
collection.add(
"Dalmatian",
description="The Dalmatian is a breed of large dog, noted for its unique black or liver spotted coat and mainly used as a carriage dog in its early days.",
)
collection.add(
"Shiba Inu",
image_url="https://upload.wikimedia.org/wikipedia/commons/thumb/5/58/Shiba_inu_taiki.jpg/1920px-Shiba_inu_taiki.jpg",
)
collection.save()
collection = Collection(name="Single Item Collection")
collection.add("Item One")
collection.save()
collection = Collection(name="Redirected Collection", code="from-old")
collection.items = [
Item(name="A").save(),
Item(name="B").save(),
Item(name="C").save(),
Item(name="D").save(),
]
collection.save()
Redirect(start_slug="old", end_slug="from-old").save()
| StarcoderdataPython |
3283122 | from __future__ import unicode_literals
from rest_framework import generics
from rest_framework import permissions
from rest_framework.exceptions import NotFound
from api.actions.serializers import PreprintRequestActionSerializer
from api.base.views import JSONAPIBaseView
from api.base import permissions as base_permissions
from api.base.filters import ListFilterMixin
from api.base.utils import get_object_or_error
from api.requests.permissions import NodeRequestPermission, PreprintRequestPermission
from api.requests.serializers import NodeRequestSerializer, PreprintRequestSerializer
from api.providers.permissions import MustBeModerator
from framework.auth.oauth_scopes import CoreScopes
from osf.models import Node, NodeRequest, PreprintRequest, PreprintService
class RequestMixin(object):
serializer_class = None
request_class = None
request_display_name = None
target_class = None
target_display_name = None
target_lookup_url_kwarg = None
request_lookup_url_kwarg = None
def __get_object(self, object_class, lookup_arg, display_name, check_object_permissions=True):
obj = get_object_or_error(
object_class,
self.kwargs[lookup_arg],
self.request,
display_name=display_name,
)
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, obj)
return obj
def get_request(self, check_object_permissions=True):
return self.__get_object(self.request_class, self.request_lookup_url_kwarg, self.request_display_name, check_object_permissions=check_object_permissions)
def get_target(self, check_object_permissions=True):
return self.__get_object(self.target_class, self.target_lookup_url_kwarg, self.target_display_name, check_object_permissions=check_object_permissions)
class NodeRequestMixin(RequestMixin):
serializer_class = NodeRequestSerializer
request_class = NodeRequest
request_display_name = 'node request'
target_class = Node
target_display_name = 'node'
target_lookup_url_kwarg = 'node_id'
request_lookup_url_kwarg = 'request_id'
class PreprintRequestMixin(RequestMixin):
serializer_class = PreprintRequestSerializer
request_class = PreprintRequest
request_display_name = 'preprint request'
target_class = PreprintService
target_display_name = 'preprint'
target_lookup_url_kwarg = 'preprint_id'
request_lookup_url_kwarg = 'request_id'
class RequestDetail(JSONAPIBaseView, generics.RetrieveAPIView):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC] # Actual scope checks are done on subview.as_view
required_write_scopes = [CoreScopes.NULL]
view_category = 'requests'
view_name = 'request-detail'
def get(self, request, *args, **kwargs):
request_id = self.kwargs['request_id']
if NodeRequest.objects.filter(_id=request_id).exists():
return NodeRequestDetail.as_view()(request, *args, **kwargs)
elif PreprintRequest.objects.filter(_id=request_id).exists():
return PreprintRequestDetail.as_view()(request, *args, **kwargs)
else:
raise NotFound
class NodeRequestDetail(JSONAPIBaseView, generics.RetrieveAPIView, NodeRequestMixin):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
NodeRequestPermission,
)
required_read_scopes = [CoreScopes.NODE_REQUESTS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeRequestSerializer
view_category = 'requests'
view_name = 'node-request-detail'
def get_object(self):
return self.get_request()
class PreprintRequestDetail(JSONAPIBaseView, generics.RetrieveAPIView, PreprintRequestMixin):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PreprintRequestPermission,
)
required_read_scopes = [CoreScopes.PREPRINT_REQUESTS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = PreprintRequestSerializer
view_category = 'requests'
view_name = 'preprint-request-detail'
def get_object(self):
return self.get_request()
class RequestActionList(JSONAPIBaseView, generics.ListAPIView):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ACTIONS_READ]
required_write_scopes = [CoreScopes.NULL]
view_category = 'requests'
view_name = 'request-action-list'
def get(self, request, *args, **kwargs):
request_id = self.kwargs['request_id']
if PreprintRequest.objects.filter(_id=request_id).exists():
return PreprintRequestActionList.as_view()(request, *args, **kwargs)
else:
raise NotFound
class PreprintRequestActionList(JSONAPIBaseView, generics.ListAPIView, PreprintRequestMixin, ListFilterMixin):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
MustBeModerator,
)
required_read_scopes = [CoreScopes.ACTIONS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = PreprintRequestActionSerializer
view_category = 'requests'
view_name = 'preprint-request-action-list'
# supports MustBeModerator
def get_provider(self):
request_id = self.kwargs['request_id']
preprint_request = PreprintRequest.load(request_id)
if preprint_request:
return preprint_request.target.provider
raise NotFound
# overrides ListFilterMixin
def get_default_queryset(self):
return self.get_request().actions.all()
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
| StarcoderdataPython |
3276997 | <reponame>effie-ms/eeflows
from django.contrib import admin
from stations.models import BioPeriod, Station
admin.site.register(Station)
admin.site.register(BioPeriod)
| StarcoderdataPython |
3354481 | from http import HTTPStatus
from pytest import mark
from freddie.viewsets.dependencies import Paginator
from .app import Item, test_item, test_items_seq
from .utils import WithClient
pytestmark = mark.asyncio
api_prefixes = {
'argnames': 'prefix',
'argvalues': ['/unvalidated', '/validated', '/sync'],
'ids': ['unvalidated', 'validated', 'synchronous'],
}
pk = 42
class TestBasicViewSet(WithClient):
async def test_api_schema_is_accessable(self):
response = await self.client.get('/openapi.json')
assert response.status_code == HTTPStatus.OK
@mark.parametrize(**api_prefixes)
async def test_list(self, prefix):
response = await self.client.get(prefix + '/')
assert response.status_code == HTTPStatus.OK
assert response.json() == await Item.serialize(test_items_seq)
@mark.parametrize(**api_prefixes)
async def test_retrieve(self, prefix):
response = await self.client.get(f'{prefix}/{pk}')
assert response.status_code == HTTPStatus.OK
assert response.json() == await test_item.get_serialized()
@mark.parametrize(**api_prefixes)
async def test_invalid_pk_retrieve(self, prefix):
response = await self.client.get(prefix + '/foobar')
assert response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY
@mark.parametrize(**api_prefixes)
async def test_create(self, prefix):
data = {'id': 43, 'title': 'Yay'}
created_item = Item(**data)
response = await self.client.post(prefix + '/', json=data)
assert response.status_code == HTTPStatus.CREATED
assert response.json() == await created_item.get_serialized()
@mark.parametrize(**api_prefixes)
async def test_update(self, prefix):
updated_item = Item(title='Yello')
response = await self.client.put(f'{prefix}/{pk}', json=updated_item.dict())
assert response.status_code == HTTPStatus.OK
assert response.json() == await updated_item.get_serialized()
@mark.parametrize(**api_prefixes)
async def test_update_partial(self, prefix):
data = {'title': 'OK'}
updated_item = Item(**data)
response = await self.client.patch(f'{prefix}/{pk}', json=data)
assert response.status_code == HTTPStatus.OK
assert response.json() == await updated_item.get_serialized()
@mark.parametrize(**api_prefixes)
async def test_destroy(self, prefix):
response = await self.client.delete(f'{prefix}/{pk}')
assert response.status_code == HTTPStatus.NO_CONTENT
assert response.text == ''
ROUTE_QUERY_PARAMS = {'foo': 'one', 'bar': 42}
@mark.parametrize(
'path',
[
'/unvalidated/listroute',
f'/unvalidated/{pk}/detail',
'/unvalidated/listcustom',
f'/unvalidated/{pk}/detailcustom',
],
ids=['list', 'detail', 'list_named', 'detail_named'],
)
async def test_custom_route(self, path):
invalid_response = await self.client.get(path)
assert invalid_response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY
response = await self.client.get(path, query_string=self.ROUTE_QUERY_PARAMS)
assert response.status_code == HTTPStatus.OK
assert response.json() == self.ROUTE_QUERY_PARAMS
class TestPaginatedViewSet(WithClient):
@mark.parametrize(
'limit,offset', [(Paginator.default_limit, Paginator.default_offset), (10, 3)]
)
async def test_default_pagination(self, limit, offset):
response = await self.client.get(
'/paginated/', query_string={'limit': limit, 'offset': offset}
)
assert response.status_code == HTTPStatus.OK
response_data = response.json()
assert len(response_data) == limit
assert response_data[limit - 1]['id'] == limit + offset
@mark.parametrize(
'limit,offset',
[
('string', 10),
(20, 'foobar'),
(Paginator.max_limit + 1, 0),
(Paginator.max_limit + 100, 200),
],
)
async def test_invalid_values(self, limit, offset):
response = await self.client.get(
'/paginated/', query_string={'limit': limit, 'offset': offset}
)
assert response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY
class TestFieldedViewSet(WithClient):
request_fields = {'title'}
@property
def request_fields_qs(self):
return {'fields': ','.join(self.request_fields)}
async def test_list(self):
response = await self.client.get('/fielded/', query_string=self.request_fields_qs)
assert response.status_code == HTTPStatus.OK
for item in response.json():
for field in self.request_fields:
assert field in item
async def test_retrieve(self):
response = await self.client.get(f'/fielded/{pk}', query_string=self.request_fields_qs)
assert response.status_code == HTTPStatus.OK
response_data = response.json()
for field in self.request_fields:
assert field in response_data
async def test_http_exceptions(client):
detail = 'NOTFOUND'
header = 'custom-header'
response = await client.get('/notfound', query_string={'detail': detail, 'header': header})
assert response.status_code == HTTPStatus.NOT_FOUND
assert response.json().get('detail') == detail
assert response.headers['x-custom-header'] == header
| StarcoderdataPython |
3267329 | <gh_stars>0
# Generated by Django 3.2.8 on 2021-10-14 16:16
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20211014_1611'),
]
operations = [
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(max_length=255, validators=[django.core.validators.RegexValidator(message='Name format is invalid', regex="/^[a-z ,.'-]+$/i")]),
),
]
| StarcoderdataPython |
3379722 | from flask_login import UserMixin, current_user
from iot_lab_inventory import db, login_manager
# from .cart import Cart, CartItem
class Part(db.Model):
__tablename__ = 'parts'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
description = db.Column(db.String)
category = db.Column(db.String(64))
sparkfun_id = db.Column(db.String(64))
images = db.relationship('Image', backref='part')
documents = db.relationship('Document', backref='part')
inventory_item = db.relationship('InventoryItem', back_populates='part', uselist=False)
class Image(db.Model):
__tablename__ = 'images'
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String, default=None, nullable=True)
part_id = db.Column(db.Integer, db.ForeignKey('parts.id'))
class Document(db.Model):
__tablename__ = 'documents'
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String, default=None, nullable=True)
part_id = db.Column(db.Integer, db.ForeignKey('parts.id'))
class InventoryItem(db.Model):
__tablename__ = 'inventory_items'
id = db.Column(db.Integer, primary_key=True)
quantity = db.Column(db.Integer)
part_id = db.Column(db.Integer, db.ForeignKey('parts.id'))
part = db.relationship("Part", back_populates='inventory_item', uselist=False)
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(20), unique=True)
email = db.Column(db.String)
name = db.Column(db.String)
picture = db.Column(db.String)
is_admin = db.Column(db.Boolean)
orders = db.relationship('Order', backref='user')
def __init__(self, username, email, name, picture):
self.username = username
self.email = email
self.name = name
self.picture = picture
self.is_admin = False
@staticmethod
@login_manager.user_loader
def load_user(id):
return User.query.filter_by(id=id).first()
class Order(db.Model):
__tablename__ = 'orders'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
status = db.Column(db.String, default="Pending")
created_at = db.Column(db.Date)
order_items = db.relationship('OrderItem', backref='order')
class OrderItem(db.Model):
__tablename__ = 'order_items';
id = db.Column(db.Integer, primary_key=True)
part_id = db.Column(db.Integer, db.ForeignKey('parts.id'))
part = db.relationship('Part', uselist=False)
order_id = db.Column(db.Integer, db.ForeignKey('orders.id'))
quantity = db.Column(db.Integer)
| StarcoderdataPython |
174195 | <reponame>Mohammed785/University-System<filename>university_system/quizzes/urls.py
from django.urls import path
from .views import (
create_question_choice,create_quiz,create_quiz_question,CourseQuizzesView,update_question_choice,
update_question,update_quiz,delete_choice,delete_question,
delete_quiz,take_quiz_view,quiz_answer_view,review_quiz_view,
)
urlpatterns = [
path("course/<course_code>/quiz/create/", create_quiz, name="create-quiz"),
path("quiz/<slug:slug>/question/create/", create_quiz_question, name="create-quiz-question"),
path("question/<slug:slug>/choice/create/", create_question_choice, name="create-q-choice"),
path("quiz/<slug:slug>/update/", update_quiz, name="quiz-update"),
path("quiz/<slug>/", take_quiz_view, name="quiz-view"),
path("quiz/<slug:slug>/answers/", quiz_answer_view, name="quiz-answers-view"),
path("quiz/<slug:slug>/review/", review_quiz_view, name="quiz-review"),
path("course/<course_code>/quizzes/",CourseQuizzesView.as_view(),name="course-quizzes"),
path("question/<slug:slug>/update/",update_question,name="quiz-q-update"),
path("choice/<slug:slug>/update/",update_question_choice,name="question-c-update"),
path("question/<slug:slug>/delete/",delete_question,name="quiz-q-delete"),
path("quiz/<slug:slug>/delete/", delete_quiz, name="quiz-delete"),
path("choice/<slug:slug>/delete/",delete_choice,name="question-c-delete")
]
| StarcoderdataPython |
3300685 | import pandas as pd
import numpy as np
from annotations.CONSTANTS import *
import pickle
def save_as_csv(X, Y, feature_name, output_dir, output_filename='features_and_labels.csv'):
# print(X[0], len(X[0]), len(feature_name))
# print('#x', len(X), '#y', len(Y))
data = np.array(X)
pd_data = pd.DataFrame(data=data,columns=feature_name)
pd_data['label'] = [v for v in Y]
# pd_data['event_key'] = [v[1] for v in Y]
pd_data.to_csv(output_dir + output_filename, encoding='utf-8', index=False)
print('---feature saved in ', output_dir + output_filename)
def append_to_file(filename, txt):
fh = open(filename, 'a')
fh.write(txt + '\n')
fh.close()
def save_model_outputs(T, Y, Y_init, Y_smooth, filename):
df = pd.DataFrame({'T': T, 'Y': Y, 'Y_init': Y_init, 'Y_smooth': Y_smooth})
df.to_csv(result_dir + 'model_outputs/' + filename, index=False)
def save_model_file(model, filename = 'brushingAB.model'):
# save the model
pickle.dump(model, open(filename, 'wb'))
def load_model_from_file(filename = 'brushingAB.model'):
# load the model from file
loaded_model = pickle.load(open(filename, 'rb'))
return loaded_model
| StarcoderdataPython |
3374702 | <gh_stars>1-10
import asyncio
import json
import subprocess
import time
import traceback
from io import StringIO
from asgiref.sync import sync_to_async
from django.core.exceptions import ValidationError
from django.core.management import call_command
from django.db import models
from django.utils.translation import gettext_lazy as _
import fullctl.django.tasks
from fullctl.django.models.abstract.base import HandleRefModel
from fullctl.django.tasks.util import worker_id
__all__ = [
"LimitAction",
"TaskClaimed",
"WorkerUnqualified",
"TaskLimitError",
"TaskAlreadyStarted",
"ParentTaskNotFinished",
"Task",
"TaskClaim",
"CallCommand",
]
class LimitAction:
error = 0
silent = 1
class TaskClaimed(IOError):
def __init__(self, task):
super().__init__(f"Task already claimed by another worker: {task}")
class WorkerUnqualified(IOError):
def __init__(self, task, qualifier):
super().__init__(
f"Worker does not qualify to process this task: {task}, {qualifier}"
)
class TaskLimitError(IOError):
"""
Raised when there are currently too many pending
instances of a task
"""
pass
class TaskAlreadyStarted(IOError):
"""
Raised when trying to work a task that is already
started
"""
pass
class ParentTaskNotFinished(IOError):
"""
Raised when trying to work a child task
before the parent task has finished
"""
pass
class Task(HandleRefModel):
"""
Describes an asynchronous task
Task status values:
- pending: task waiting for execution
- running: task is currently executing
- completed: task completed succesfully
- failed: task ended due to error
- cancelled: task canceled
"""
op = models.CharField(max_length=255)
limit_id = models.CharField(max_length=255, blank=True, default="")
status = models.CharField(
max_length=255,
choices=(
("pending", _("Pending")),
("running", _("Running")),
("completed", _("Completed")),
("failed", _("Failed")),
("cancelled", _("Cancelled")),
),
default="pending",
)
param_json = models.TextField(
blank=True, null=True, help_text="json containing args and kwargs"
)
error = models.TextField(
blank=True,
null=True,
help_text="if the task failed will contain traceback or error info",
)
output = models.TextField(
blank=True,
null=True,
help_text="task output - can also be used to store results",
)
timeout = models.IntegerField(
null=True, blank=True, default=None, help_text="task timeout in seconds"
)
time = models.FloatField(default=0.0, help_text="time sepnt in seconds")
source = models.CharField(
max_length=255,
null=True,
blank=True,
default=worker_id,
help_text="host id where task was triggered",
)
queue_id = models.CharField(
max_length=255,
null=True,
blank=True,
help_text="task queue id (celery task id or orm worker id)",
)
parent = models.ForeignKey(
"django_fullctl.Task",
on_delete=models.CASCADE,
null=True,
blank=True,
related_name="children",
)
class Meta:
db_table = "fullctl_task"
verbose_name = _("Task")
verbose_name_plural = _("Tasks")
class HandleRef:
tag = "task"
@classmethod
def create_task(cls, *args, **kwargs):
parent = kwargs.pop("parent", None)
timeout = kwargs.pop("timeout", None)
op = cls.HandleRef.tag
if parent:
parent = Task(id=parent.id)
param = {"args": args or [], "kwargs": kwargs or {}}
task = cls(op=op, param=param, status="pending", parent=parent, timeout=timeout)
task.limit_id = task.generate_limit_id
try:
task.clean()
except TaskLimitError:
if task.limit_action == LimitAction.silent:
pass
raise
task.save()
return task
@property
def generate_limit_id(self):
return ""
@property
def limit(self):
return self.task_meta_property("limit")
@property
def limit_action(self):
return self.task_meta_property("limit_action", LimitAction.error)
@property
def param(self):
"""
Will return a dict from the param_json value
"""
if self.param_json:
return json.loads(self.param_json)
return {"args": [], "kwargs": {}}
@param.setter
def param(self, param):
"""
Will take a dict and convert to json and
store in the param_json field
"""
self.param_json = json.dumps(param)
@property
def result(self):
"""
Returns the task result if task completed
otherwise returns None
You can type cast a result type by specifying
the `result_type` property in `TaskMeta`
"""
if self.status == "completed":
typ = self.task_meta_property("result_type", str)
return typ(self.output)
return None
@property
def task_meta(self):
"""
Returns TaskMeta if it exists, None otherwise
"""
return getattr(self, "TaskMeta", None)
@property
def qualifies(self):
"""
Checks if the environment qualifies to process
this task.
Will raise a WorkerUnqualified exception if not
"""
task_meta = self.task_meta
if not task_meta:
return True
qualifiers = getattr(task_meta, "qualifiers", [])
for qualifier in qualifiers:
if not qualifier.check(self):
raise WorkerUnqualified(self, qualifier)
return True
def __str__(self):
return f"{self.__class__.__name__}({self.id}): {self.param['args']}"
def wait(self, timeout=None):
"""
Waits for the task to be completed. This is a blocking action
Keyword Arguments:
- timeout(`int`): if specified timeout after n seconds
"""
t = time.time()
while True:
if self.status == "completed":
return
time.sleep(0.1)
self.refresh_from_db()
if timeout and (time.time() - t).total_seconds > timeout:
raise OSError("Task wait() timeout")
async def async_wait(self, timeout=None):
"""
Waits for a task to be completed with asyncio.
This is a blocking action
Keyword Arguments:
- timeout(`int`): if specified timeout after n seconds
"""
t = time.time()
while True:
if self.status == "completed":
return
await asyncio.sleep(0.1)
sync_to_async(self.refresh_from_db)()
if timeout and (time.time() - t).total_seconds > timeout:
raise OSError("Task wait() timeout")
def task_meta_property(self, name, default=None):
"""
Returns a TaskMeta property value
Arguments:
- name(`str`) property name
Keyword Arguments:
- default(`mixed`): default value to return if property
is not specified
"""
task_meta = self.task_meta
if not task_meta:
return default
return getattr(task_meta, name, default)
def validate_limits(self):
"""
Checks that creating a new instance of this task
doesnt violate any limit we have specified
"""
# no limit specified
if self.limit is None:
return
op = self.HandleRef.tag
limit_id = self.generate_limit_id
count = self._meta.model.objects.filter(
op=op, limit_id=limit_id, status__in=["pending", "running"]
).count()
# if the count of currently pending / running instances of this
# task is higher than the limit we specified we raise a
# TaskLimitError
if self.limit <= count:
raise TaskLimitError()
def clean(self):
super().clean()
try:
if self.param_json:
json.loads(self.param_json)
except Exception as exc:
raise ValidationError(f"Parameters could not be JSON encoded: {exc}")
# this needs to be the last validation
self.validate_limits()
def cancel(self, reason):
self._cancel(reason)
def _cancel(self, reason):
self.output = reason
self.status = "cancelled"
self.save()
def _complete(self, output):
self.output = output
self.status = "completed"
self.save()
def _fail(self, error):
self.error = error
self.status = "failed"
self.save()
def _run(self):
if self.status != "pending":
raise TaskAlreadyStarted()
if self.parent and self.parent.status != "completed":
raise ParentTaskNotFinished()
self.status = "running"
self.save()
t_start = time.time()
try:
param = self.param
output = self.run(*param["args"], **param["kwargs"])
t_end = time.time()
self.time = t_end - t_start
self._complete(output)
except Exception:
self._fail(traceback.format_exc())
def run(self, *args, **kwargs):
"""extend in proxy model"""
raise NotImplementedError()
def run_command(self, command):
# TODO this needs to capture output
subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
class TaskClaim(HandleRefModel):
"""
Used by a worker to claim a task
Whenever a worker claims a task the worker first
needs to create a taskclaim object for it.
This object has a unique constraint on the task,
preventing race conditions when several workers
are polling and claiming tasks asynchronously.
"""
task = models.OneToOneField(Task, on_delete=models.CASCADE)
worker_id = models.CharField(max_length=255)
class Meta:
db_table = "fullctl_task_claim"
verbose_name = _("Claimed Task")
verbose_name_plural = _("Claimed Tasks")
class HandleRef:
tag = "task_claim"
@fullctl.django.tasks.register
class CallCommand(Task):
"""
Django management tasks
"""
class Meta:
proxy = True
class HandleRef:
tag = "task_callcommand"
def run(self, *args, **kwargs):
"""
Executes a django management command
"""
out = StringIO()
call_command(commit=True, *args, **kwargs, stdout=out)
return f"{out.getvalue()}"
| StarcoderdataPython |
158362 | <reponame>JasonFruit/hymnal-tools<filename>HtmlEmitter.py<gh_stars>1-10
class HtmlEmitter(object):
def emit(self, s):
self.file.write(s)
def emit_line(self, s=""):
self.emit(s)
self.emit("\n")
def initialize(self, filename, title, author, date):
self.file = open(filename, "w")
self.emit_line("""<html>
<head>
<link rel="stylesheet" type="text/css" href="hymnal.css" />
<meta charset="utf-8" />
<title>%s, %s</title>
</head>
<body>""" % (title, author))
self.emit_line("""<h1 class="hymnal_title">%s</h1>""" % title)
self.emit_line("""<h2 class="hymnal_subtitle">%s (%s)</h2>""" % (author, date))
def emit_category(self, category):
self.emit_line("""<h3 class="category">%s</h3>""" % category)
def emit_header(self, num, meter, author):
self.emit_line("""<div class="hymn">""")
if author == "":
self.emit_line(
"""<span class="hymn_num">%s</span>. (<span class="meter">%s</span>)<br />""" %
(num, meter))
else:
self.emit_line(
"""<span class="hymn_num">%s</span>. (<span class="meter">%s</span>) <span class="author">%s</span><br />""" %
(num, meter, author))
def emit_footer(self):
self.emit_line("</div>")
def emit_stanza(self, stanza):
self.emit_line("""<div class="stanza">""")
self.emit("""<span class="stanza_num">%s</span>""" % stanza.num)
for line in stanza:
self.emit_line("%s<br />" % line)
self.emit_line("</div>")
def finalize(self):
self.emit_line("""</body>
</html>""")
self.file.close()
| StarcoderdataPython |
1799714 | #-----------------------------------------------------------------------------
# Name: Formal Documentation i.e. docstrings (formalDocumentation_ex4.py)
# Purpose: Provides an example of how to create docstrings in Python using
# formal documentation standards.
#
# Author: <NAME>
# Created: 22-Aug-2018
# Updated: 04-Nov-2018 (fixed spacing)
#-----------------------------------------------------------------------------
def printEmployee(name, occupation, salary):
'''
Prints out the information given about an employee.
Takes in information from the user and then prints out
a summary of the name, occupation, and salary of the
information given.
Parameters
----------
name : str
The name of the person to be printed
occupation : str
The occupation of the person to be printed
salary : float
The salary of the person to be printed.
Returns
-------
None
'''
print('Name: ' + str(name))
print('Occupation: ' + str(occupation))
print('Salary: ' + str(salary))
# Program runs starting here. Above this line, the functions are just defined.
printEmployee(salary=31000, name="January", occupation="Month") # note order
printEmployee(name="February", occupation="Month", salary=28000) # note order
| StarcoderdataPython |
191913 | <gh_stars>0
from json import load, loads, dump
from subprocess import run, PIPE, Popen
def loadConfig(container):
return load(
open(f"/var/lib/docker/containers/{container}/config.v2.json", "r"))
def writeConfig(container, obj):
print("Write config")
return dump(
obj, open(f"/var/lib/docker/containers/{container}/config.v2.json",
"w"))
def runCmd(*cmd):
return run(cmd, stdout=PIPE).stdout.decode("utf-8")
def runFollow(command):
process = Popen(command, stdout=PIPE, shell=True)
while True:
line = process.stdout.readline().rstrip().decode("utf-8")
if line == '' and process.poll() is not None:
break
if "Upgrade Complete!" in line:
break
if line:
yield line.strip()
def restartDocker():
print("Restart docker:")
out = runCmd("service", "docker", "restart")
print(out)
def startOrStopContainer(container, start):
print(runCmd("docker", "start" if start else "stop", container))
container = "dd7eba130a8c7186f501a3d900662954cf1941fd49ff6d62594bdd8fe91f1b30"
configobj = loadConfig(container)
print("Modify args for upgrading")
startOrStopContainer(container, False)
configobj["Args"][0] = "-c"
configobj["Args"][1] = "./nodebb upgrade"
writeConfig(container, configobj)
restartDocker()
out = runCmd("docker", "inspect", container)
print("Run with ", loads(out)[0]["Args"])
startOrStopContainer(container, True)
for line in runFollow(f"docker logs --tail 100 -f {container}"):
print(line)
startOrStopContainer(container, False)
configobj = loadConfig(container)
configobj["Args"][1] = "./nodebb start"
writeConfig(container, configobj)
restartDocker()
startOrStopContainer(container, True)
| StarcoderdataPython |
3300829 | from typing import Any, Dict, Optional, Type
import pytest
from importlinter.domain.fields import (
DirectImportField,
Field,
ListField,
ModuleField,
SetField,
StringField,
ValidationError,
)
from importlinter.domain.imports import DirectImport, Module
class BaseFieldTest:
field_class: Optional[Type[Field]] = None
field_kwargs: Dict[str, Any] = {}
def test_field(self, raw_data, expected_value):
field = self.field_class(**self.field_kwargs)
if isinstance(expected_value, ValidationError):
try:
field.parse(raw_data) == expected_value
except ValidationError as e:
assert e.message == expected_value.message
else:
assert field.parse(raw_data) == expected_value
@pytest.mark.parametrize(
"raw_data, expected_value",
(
("Hello, world!", "Hello, world!"),
(
["one", "two", "three"],
ValidationError("Expected a single value, got multiple values."),
),
),
)
class TestStringField(BaseFieldTest):
field_class = StringField
@pytest.mark.parametrize(
"raw_data, expected_value",
(
("mypackage.foo.bar", Module("mypackage.foo.bar")),
(
["one", "two", "three"],
ValidationError("Expected a single value, got multiple values."),
),
# TODO - test that it belongs in the root package.
),
)
class TestModuleField(BaseFieldTest):
field_class = ModuleField
@pytest.mark.parametrize(
"raw_data, expected_value",
(
(
"mypackage.foo -> mypackage.bar",
DirectImport(importer=Module("mypackage.foo"), imported=Module("mypackage.bar")),
),
(
["one", "two", "three"],
ValidationError("Expected a single value, got multiple values."),
),
(
"mypackage.foo - mypackage.bar",
ValidationError('Must be in the form "package.importer -> package.imported".'),
),
(
"my-package.foo -> my-package.bar",
DirectImport(importer=Module("my-package.foo"), imported=Module("my-package.bar")),
),
),
)
class TestDirectImportField(BaseFieldTest):
field_class = DirectImportField
@pytest.mark.parametrize(
"raw_data, expected_value",
(
(["mypackage.foo", "mypackage.bar"], [Module("mypackage.foo"), Module("mypackage.bar")]),
(["mypackage.foo", "mypackage.foo"], [Module("mypackage.foo"), Module("mypackage.foo")]),
("singlevalue", [Module("singlevalue")]),
),
)
class TestListField(BaseFieldTest):
field_class = ListField
field_kwargs = dict(subfield=ModuleField())
@pytest.mark.parametrize(
"raw_data, expected_value",
(
(["mypackage.foo", "mypackage.bar"], {Module("mypackage.foo"), Module("mypackage.bar")}),
(["mypackage.foo", "mypackage.foo"], {Module("mypackage.foo")}),
("singlevalue", {Module("singlevalue")}),
),
)
class TestSetField(BaseFieldTest):
field_class = SetField
field_kwargs = dict(subfield=ModuleField())
| StarcoderdataPython |
3375646 | import nextcord
from nextcord.ext import commands
class say(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def say(self, ctx, *,message=None):
if message == None:
await ctx.reply('Give me a word to say!')
else:
e=nextcord.Embed(description=message)
e.set_footer(text=f"Requested by {ctx.author.name}")
await ctx.send(embed=e)
def setup(client):
client.add_cog(say(client))
| StarcoderdataPython |
3352986 | import argparse
import csv
import json
import logging
import os
from datetime import datetime
import torch
from data_utils.log_wrapper import create_logger
from data_utils.metrics import compute_acc, compute_cross_entropy
from data_utils.utils import set_environment
from mt_dnn.gobbli_batcher import GobbliBatchGen
from mt_dnn.gobbli_model import GobbliMTDNNModel
logging.basicConfig(level=logging.INFO)
def model_config(parser):
parser.add_argument("--update_bert_opt", default=0, type=int)
parser.add_argument("--multi_gpu_on", action="store_true")
parser.add_argument(
"--mem_cum_type", type=str, default="simple", help="bilinear/simple/defualt"
)
parser.add_argument("--answer_num_turn", type=int, default=5)
parser.add_argument("--answer_mem_drop_p", type=float, default=0.1)
parser.add_argument("--answer_att_hidden_size", type=int, default=128)
parser.add_argument(
"--answer_att_type",
type=str,
default="bilinear",
help="bilinear/simple/defualt",
)
parser.add_argument(
"--answer_rnn_type", type=str, default="gru", help="rnn/gru/lstm"
)
parser.add_argument(
"--answer_sum_att_type",
type=str,
default="bilinear",
help="bilinear/simple/defualt",
)
parser.add_argument("--answer_merge_opt", type=int, default=1)
parser.add_argument("--answer_mem_type", type=int, default=1)
parser.add_argument("--answer_dropout_p", type=float, default=0.1)
parser.add_argument("--answer_weight_norm_on", action="store_true")
parser.add_argument("--dump_state_on", action="store_true")
parser.add_argument("--answer_opt", type=int, default=0, help="0,1")
parser.add_argument("--mtl_opt", type=int, default=0)
parser.add_argument("--ratio", type=float, default=0)
parser.add_argument("--mix_opt", type=int, default=0)
parser.add_argument("--max_seq_len", type=int, default=512)
parser.add_argument("--init_ratio", type=float, default=1)
return parser
def data_config(parser):
parser.add_argument(
"--log_file", default="mt-dnn-train.log", help="path for log file."
)
parser.add_argument(
"--init_checkpoint", default="mt_dnn/bert_model_base.pt", type=str
)
parser.add_argument("--data_dir", default="data/mt_dnn")
parser.add_argument("--data_sort_on", action="store_true")
parser.add_argument("--name", default="farmer")
parser.add_argument("--train_file")
parser.add_argument("--valid_file")
parser.add_argument("--test_file")
parser.add_argument("--label_file", required=True)
return parser
def train_config(parser):
parser.add_argument(
"--cuda",
type=bool,
default=torch.cuda.is_available(),
help="whether to use GPU acceleration.",
)
parser.add_argument("--log_per_updates", type=int, default=500)
parser.add_argument("--epochs", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--batch_size_eval", type=int, default=8)
parser.add_argument(
"--optimizer",
default="adamax",
help="supported optimizer: adamax, sgd, adadelta, adam",
)
parser.add_argument("--grad_clipping", type=float, default=0)
parser.add_argument("--global_grad_clipping", type=float, default=1.0)
parser.add_argument("--weight_decay", type=float, default=0)
parser.add_argument("--learning_rate", type=float, default=5e-5)
parser.add_argument("--momentum", type=float, default=0)
parser.add_argument("--warmup", type=float, default=0.1)
parser.add_argument("--warmup_schedule", type=str, default="warmup_linear")
parser.add_argument("--vb_dropout", action="store_false")
parser.add_argument("--dropout_p", type=float, default=0.1)
parser.add_argument("--dropout_w", type=float, default=0.000)
parser.add_argument("--bert_dropout_p", type=float, default=0.1)
# EMA
parser.add_argument("--ema_opt", type=int, default=0)
parser.add_argument("--ema_gamma", type=float, default=0.995)
# scheduler
parser.add_argument(
"--have_lr_scheduler", dest="have_lr_scheduler", action="store_false"
)
parser.add_argument("--multi_step_lr", type=str, default="10,20,30")
parser.add_argument("--freeze_layers", type=int, default=-1)
parser.add_argument("--embedding_opt", type=int, default=0)
parser.add_argument("--lr_gamma", type=float, default=0.5)
parser.add_argument("--bert_l2norm", type=float, default=0.0)
parser.add_argument("--scheduler_type", type=str, default="ms", help="ms/rop/exp")
parser.add_argument("--output_dir", default="checkpoint")
parser.add_argument(
"--seed",
type=int,
default=2018,
help="random seed for data shuffling, embedding init, etc.",
)
parser.add_argument(
"--task_config_path", type=str, default="configs/tasks_config.json"
)
return parser
def dump(path, data):
with open(path, "w") as f:
json.dump(data, f)
def dump_predictions(scores, labels, output_file):
# Map label indices (used internally for predictions) to label names
# (which the user will be familiar with)
labels_inverse = {ndx: label for label, ndx in labels.items()}
label_header = [labels_inverse[i] for i in range(len(labels))]
with open(output_file, "w") as f:
writer = csv.writer(f)
# Write the header
writer.writerow(label_header)
# Write the predictions
for row in scores.tolist():
writer.writerow(row)
def read_labels(label_file):
with open(label_file, "r") as f:
return {l.strip(): i for i, l in enumerate(f.readlines())}
def eval_model(model, dataset, use_cuda=True, with_label=True):
dataset.reset()
if use_cuda:
model.cuda()
predictions = []
scores = []
golds = []
metrics = {}
for batch in dataset:
if with_label:
input_ids, token_type_ids, attention_mask, labels = batch
golds.extend(labels)
else:
input_ids, token_type_ids, attention_mask = batch
score, pred = model.predict(input_ids, token_type_ids, attention_mask)
predictions.extend(pred)
scores.append(score)
scores = torch.cat(scores, 0)
if not with_label:
return predictions, scores
metrics["accuracy"] = compute_acc(predictions, golds)
metrics["loss"] = compute_cross_entropy(scores, torch.LongTensor(golds)).item()
return metrics, predictions, scores
parser = argparse.ArgumentParser()
parser = data_config(parser)
parser = model_config(parser)
parser = train_config(parser)
args = parser.parse_args()
given_train = args.train_file is not None
given_valid = args.valid_file is not None
given_test = args.test_file is not None
if given_train and not given_valid or given_valid and not given_train:
raise ValueError("Must have both a train and valid dataset for training.")
output_dir = args.output_dir
data_dir = args.data_dir
os.makedirs(output_dir, exist_ok=True)
output_dir = os.path.abspath(output_dir)
set_environment(args.seed, args.cuda)
log_path = args.log_file
logger = create_logger(__name__, to_disk=True, log_file=log_path)
def main():
logger.info("Launching the MT-DNN training")
opt = vars(args)
# update data dir
opt["data_dir"] = data_dir
labels = read_labels(args.label_file)
# The original code expects this to be a comma-separated list of ints in string format
# to account for multiple tasks having different label sizes
# It works out if we just use our single label size as a string
opt["label_size"] = str(len(labels))
# This option was also assigned per-task in the original code, but we'll just take the default
opt["tasks_dropout_p"] = [args.dropout_p]
# This was also per-task, and the default command line arg is a scalar instead of a list,
# so set it correctly to be a list
opt["answer_opt"] = [0]
batch_size = args.batch_size
batch_gen_kwargs = {
"maxlen": args.max_seq_len,
"batch_size": batch_size,
"dropout_w": args.dropout_w,
"gpu": args.cuda,
}
if given_train:
train_data = GobbliBatchGen(
args.train_file,
has_labels=True,
labels=labels,
is_train=True,
**batch_gen_kwargs,
)
num_all_batches = args.epochs * len(train_data)
else:
num_all_batches = 0
if given_valid:
valid_data = GobbliBatchGen(
args.valid_file,
has_labels=True,
labels=labels,
is_train=False,
**batch_gen_kwargs,
)
if given_test:
test_data = GobbliBatchGen(
args.test_file, has_labels=False, is_train=False, **batch_gen_kwargs
)
model_path = args.init_checkpoint
if os.path.exists(model_path):
state_dict = torch.load(model_path)
config = state_dict["config"]
config["attention_probs_dropout_prob"] = args.bert_dropout_p
config["hidden_dropout_prob"] = args.bert_dropout_p
opt.update(config)
else:
raise ValueError("Could not find the init model!")
model = GobbliMTDNNModel(opt, state_dict=state_dict, num_train_step=num_all_batches)
####model meta str
headline = "############# Model Arch of MT-DNN #############"
###print network
logger.info("\n{}\n{}\n".format(headline, model.network))
# dump config
config_file = os.path.join(output_dir, "config.json")
with open(config_file, "w", encoding="utf-8") as writer:
writer.write("{}\n".format(json.dumps(opt)))
writer.write("\n{}\n{}\n".format(headline, model.network))
logger.info("Total number of params: {}".format(model.total_param))
if args.freeze_layers > 0:
model.network.freeze_layers(args.freeze_layers)
if args.cuda:
model.cuda()
if given_train and given_valid:
for epoch in range(0, args.epochs):
logger.warning("At epoch {}".format(epoch))
start = datetime.now()
# Training
train_data.reset()
for i, (input_ids, token_type_ids, attention_mask, labels) in enumerate(
train_data
):
model.update(input_ids, token_type_ids, attention_mask, labels)
if (model.updates) % args.log_per_updates == 0 or model.updates == 1:
logger.info(
"updates[{0:6}] train loss[{1:.5f}] remaining[{2}]".format(
model.updates,
model.train_loss.avg,
str(
(datetime.now() - start)
/ (i + 1)
* (len(train_data) - i - 1)
).split(".")[0],
)
)
# Training and validation metrics
for dataset, name in ((train_data, "train"), (valid_data, "valid")):
metrics, _, _ = eval_model(
model, dataset, use_cuda=args.cuda, with_label=True
)
for key, val in metrics.items():
logger.warning(
"Epoch {0} -- [{1}] {2}: {3:.3f}".format(epoch, name, key, val)
)
score_file = os.path.join(
output_dir, "{}_scores_{}.json".format(name, epoch)
)
results = {"metrics": metrics}
dump(score_file, results)
model_file = os.path.join(output_dir, "model_{}.pt".format(epoch))
model.save(model_file)
if given_test:
_, scores = eval_model(model, test_data, use_cuda=args.cuda, with_label=False)
predict_file = os.path.join(output_dir, "predict.csv")
dump_predictions(scores, labels, predict_file)
logger.info("[new predictions saved.]")
if __name__ == "__main__":
main()
| StarcoderdataPython |
192706 | # coding=utf-8
from __future__ import absolute_import
from .user import *
from .media import *
from .book import *
from .category import *
from .configuration import *
from .notify import *
| StarcoderdataPython |
3276092 | # Generated by Django 2.1.2 on 2018-10-11 04:55
from django.db import migrations, models
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(help_text='小文字の英数字および数字のみ使用できます', max_length=64, unique=True, validators=[users.models.StudentNumberValidator()], verbose_name='学生ID'),
),
]
| StarcoderdataPython |
110443 | <reponame>rupakc/Kaggle-Compendium
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn import metrics
def get_gaussian_process_regressor():
gp = GaussianProcessRegressor()
return [gp],['Gaussian Process']
def get_mlp_regressor(num_hidden_units=51):
mlp = MLPRegressor(hidden_layer_sizes=num_hidden_units)
return [mlp],['Multi-Layer Perceptron']
def get_ensemble_models():
rf = RandomForestRegressor(n_estimators=51,min_samples_leaf=5,min_samples_split=3,random_state=42)
bag = BaggingRegressor(n_estimators=51,random_state=42)
extra = ExtraTreesRegressor(n_estimators=71,random_state=42)
ada = AdaBoostRegressor(random_state=42)
grad = GradientBoostingRegressor(n_estimators=101,random_state=42)
classifier_list = [rf,bag,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list, classifier_name_list
def get_linear_model():
elastic_net = ElasticNet()
return [elastic_net],['Elastic Net']
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name ,' ---------\n'
predicted_values = trained_model.predict(X_test)
print "Mean Absolute Error : ", metrics.mean_absolute_error(y_test,predicted_values)
print "Median Absolute Error : ", metrics.median_absolute_error(y_test,predicted_values)
print "Mean Squared Error : ", metrics.mean_squared_error(y_test,predicted_values)
print "R2 Score : ", metrics.r2_score(y_test,predicted_values)
print "---------------------------------------\n"
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
def spilt_date(list_of_date_string,separator='-',format='yyyy-mm-dd'):
month_list = list([])
day_list = list([])
year_list = list([])
for date_string in list_of_date_string:
date_list = date_string.strip().split(separator)
month_list.append(date_list[1])
day_list.append(date_list[2])
year_list.append(date_list[0])
return month_list,day_list,year_list
filename_train = 'train.csv'
filename_store = 'store.csv'
imputer = Imputer()
sale_frame = pd.read_csv(filename_train)
store_frame = pd.read_csv(filename_store)
combined_frame = pd.merge(sale_frame,store_frame,left_on='Store',right_on='Store')
del combined_frame['PromoInterval']
sales_figures = combined_frame['Sales']
del combined_frame['Sales']
month_list,day_list,year_list = spilt_date(list(combined_frame['Date'].values))
combined_frame['Month'] = month_list
combined_frame['Day'] = day_list
combined_frame['Year'] = year_list
del combined_frame['Date']
combined_frame = label_encode_frame(combined_frame)
combined_frame['StateHoliday'] = map(lambda x: 1 if type(x) is str else x,combined_frame['StateHoliday'].values)
combined_frame = imputer.fit_transform(combined_frame.values)
X_train,X_test,y_train,y_test = train_test_split(combined_frame,sales_figures,test_size=0.2,random_state=42)
#classifier_list,classifier_name_list = get_ensemble_models()
#classifier_list,classifier_name_list = get_mlp_regressor()
classifier_list,classifier_name_list = get_linear_model()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
| StarcoderdataPython |
1676865 | from tetris.game import GameObject
def test_renderable():
pass
def test_game_object():
obj = GameObject()
def test_collision():
pass
| StarcoderdataPython |
4837068 | from django.test import TestCase
from mixer.backend.django import mixer
from projects.models import Project
from bugs.models import Bug
class ProjectModelTests(TestCase):
"""Test the project model"""
def setUp(self):
self.project = Project.objects.create(title='Test')
def test_project_status_enums(self):
"""Test the status enums are set up correctly"""
for active_status in Project.ACTIVE_STATUS:
self.assertIn(active_status, Project.POSSIBLE_STATUS)
for status in Project.POSSIBLE_STATUS:
self.assertIn(status, Project.STATUS_CLASSES.keys())
def test_project_set_status(self):
"""Test the set_status project method"""
self.project.set_status('ON-GOING')
self.assertEqual(self.project.status, 'ON-GOING')
with self.assertRaises(ValueError):
self.project.set_status('NON_EXISTING_ENUM')
def test_project_status_tuples(self):
"""Test the status_tuples property"""
for st in self.project.status_tuples:
self.assertEqual(Project.STATUS_CLASSES[st[0]], st[1])
def test_project_get_active(self):
"""Test the get_active class method"""
Project.objects.create(title='Test2', _status='PAUSED')
Project.objects.create(title='Test3', _status='CLOSED')
Project.objects.create(title='Test4', _status='FINISHED')
actives = Project.get_active()
self.assertQuerysetEqual(set(actives), {str(self.project)})
def test_project_active_bugs(self):
"""Test the active_bugs project method"""
project = mixer.blend(Project)
b1 = mixer.blend(Bug, project=project, _status='WAITING')
b2 = mixer.blend(Bug, project=project, _status='BEING WORKED')
b3 = mixer.blend(Bug, project=project, _status='FIXED')
b4 = mixer.blend(Bug, project=project, _status='CLOSED')
active_bugs = project.active_bugs
self.assertIn(b1, active_bugs)
self.assertIn(b2, active_bugs)
self.assertNotIn(b3, active_bugs)
self.assertNotIn(b4, active_bugs)
p2 = Project.objects.create(title="Proj")
self.assertFalse(p2.active_bugs.exists())
| StarcoderdataPython |
1655041 | from conans import python_requires
common = python_requires('llvm-common/0.0.0@Manu343726/testing')
class ClangHeaders(common.LLVMModulePackage):
version = common.LLVMModulePackage.version
name = 'clang_headers'
llvm_component = 'clang'
header_only = True
include_dirs = ['']
| StarcoderdataPython |
1635258 | import unittest
from generativepy.nparray import make_nparray, make_nparray_frame
from generativepy.movie import save_frame
from image_test_helper import run_image_test
import numpy as np
"""
Test each function of the nparray module, with 1, 3 and 4 channel output
"""
def draw4(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a transparent blue rectangle on a brown background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[:,:] = [128, 64, 0, 255]
array[50:350, 100:500] = [0, 128, 196, 64]
def draw3(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a blue rectangle on a brown background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[:,:] = [128, 64, 0]
array[50:350, 100:500] = [0, 128, 196]
def draw1(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a dark grey rectangle on a light greay background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[:,:] = [196]
array[50:350, 100:500] = [64]
def draw3_nofill(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a blue rectangle with no background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[50:350, 100:500] = [0, 128, 196]
class TestNparrayModule(unittest.TestCase):
def test_make_nparray_rgba(self):
def creator(file):
make_nparray(file, draw4, 600, 400, channels=4)
self.assertTrue(run_image_test('test_make_nparray_rgba.png', creator))
def test_make_nparray_rgb(self):
def creator(file):
make_nparray(file, draw3, 600, 400, channels=3)
self.assertTrue(run_image_test('test_make_nparray_rgb.png', creator))
def test_make_nparray_gray(self):
def creator(file):
make_nparray(file, draw1, 600, 400, channels=1)
self.assertTrue(run_image_test('test_make_nparray_gray.png', creator))
def test_make_bitmap_frame_rgba(self):
def creator(file):
frame = make_nparray_frame(draw4, 600, 400, channels=4)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_rgba.png', creator))
def test_make_nparray_frame_rgb(self):
def creator(file):
frame = make_nparray_frame(draw3, 600, 400, channels=3)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_rgb.png', creator))
def test_make_nparray_frame_gray(self):
def creator(file):
frame = make_nparray_frame(draw1, 600, 400, channels=1)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_gray.png', creator))
def test_make_nparray_frame_with_output_rgb(self):
def creator(file):
out = np.full((400, 600, 3), 128, dtype=np.uint)
out[25:100, 50:550] = [0, 0, 0]
frame = make_nparray_frame(draw3_nofill, 600, 400, out=out)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_with_output_rgb.png', creator))
| StarcoderdataPython |
1677771 | import demistomock as demisto
import json
import pytest
from CommonServerPython import entryTypes
entryTypes['warning'] = 11
bot_id: str = '9bi5353b-md6a-4458-8321-e924af433amb'
tenant_id: str = 'pbae9ao6-01ql-249o-5me3-4738p3e1m941'
team_id: str = '19:<EMAIL>'
team_aad_id: str = '7d8efdf8-0c5a-42e3-a489-5ef5c3fc7a2b'
team_name: str = 'The-A-Team'
service_url: str = 'https://smba.trafficmanager.net/emea'
mirrored_channels: list = [
{
'channel_id': '19:2cb<EMAIL>78c624400ef<EMAIL>750539998<EMAIL>.skype',
'investigation_id': '1',
'mirror_type': 'all',
'mirror_direction': 'both',
'auto_close': 'true',
'mirrored': True,
'channel_name': 'incident-1'
},
{
'channel_id': '19:2<EMAIL>3<EMAIL>',
'investigation_id': '10',
'mirror_type': 'all',
'mirror_direction': 'both',
'auto_close': 'true',
'mirrored': True,
'channel_name': 'incident-10'
}
]
team_members: list = [
{
'id': '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg',
'objectId': '359d2c3c-162b-414c-b2eq-386461e5l050',
'name': '<NAME>',
'givenName': 'Bruce',
'surname': 'Willis',
'userPrincipalName': '<EMAIL>',
'tenantId': tenant_id
},
{
'id': '29:1pBMMC85IyjM3tr_MCZi7KW4pw4EULxLN4C7R_xoi3Wva_lOn3VTf7xJlCLK-r-pMumrmoz9agZxsSrCf7__u9R',
'objectId': '2826c1p7-bdb6-4529-b57d-2598me968631',
'name': '<NAME>',
'givenName': 'Denzel',
'surname': 'Washington',
'email': '<EMAIL>',
'userPrincipalName': '<EMAIL>',
'tenantId': tenant_id
}
]
integration_context: dict = {
'bot_name': 'DemistoBot',
'service_url': service_url,
'tenant_id': tenant_id,
'teams': json.dumps([{
'mirrored_channels': mirrored_channels,
'team_id': team_id,
'team_aad_id': team_aad_id,
'team_members': team_members,
'team_name': team_name
}])
}
@pytest.fixture(autouse=True)
def get_integration_context(mocker):
mocker.patch.object(demisto, 'getIntegrationContext', return_value=integration_context)
@pytest.fixture(autouse=True)
def get_graph_access_token(requests_mock):
requests_mock.post(
f'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token',
json={
'access_token': 'token'
},
status_code=200
)
@pytest.fixture(autouse=True)
def get_bot_access_token(requests_mock):
requests_mock.post(
'https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token',
json={
'access_token': 'token'
}
)
def test_mentioned_users_to_entities():
from MicrosoftTeams import mentioned_users_to_entities
mentioned_users = ['<NAME>', '<NAME>']
bruce_entity = {
'type': 'mention',
'mentioned': {
'id': '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg',
'name': '<NAME>'
},
'text': '<at>@Bruce Willis</at>'
}
denzel_entity = {
'type': 'mention',
'mentioned': {
'id': '29:1pBMMC85IyjM3tr_MCZi7KW4pw4EULxLN4C7R_xoi3Wva_lOn3VTf7xJlCLK-r-pMumrmoz9agZxsSrCf7__u9R',
'name': '<NAME>'
},
'text': '<at>@Denzel Washington</at>'
}
assert mentioned_users_to_entities(mentioned_users, integration_context) == [bruce_entity, denzel_entity]
mentioned_users = ['<NAME>', 'demisto']
with pytest.raises(ValueError, match='Team member demisto was not found'):
mentioned_users_to_entities(mentioned_users, integration_context)
def test_process_mentioned_users_in_message():
from MicrosoftTeams import process_mentioned_users_in_message
raw_message = '@demisto dev; @demisto; <EMAIL>; <EMAIL> hi; @hi @wow;'
parsed_message = '<at>@demisto dev</at> <at>@demisto</at> <EMAIL>; <EMAIL> hi; @hi <at>@wow</at>'
users, message = process_mentioned_users_in_message(raw_message)
assert users == ['demisto dev', 'demisto', 'wow']
assert message == parsed_message
def test_message_handler(mocker):
from MicrosoftTeams import message_handler
mocker.patch.object(demisto, 'addEntry')
request_body: dict = {
'from': {
'id': '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg',
'aadObjectId': '359d2c3c-162b-414c-b2eq-386461e5l050',
'name': '<NAME>'
}
}
channel_data: dict = {
'channel': {
'id': '19:2cbad0d78c624<EMAIL>ef<EMAIL>053<EMAIL>.skype'
},
'team': {
'id': team_id
}
}
message_handler(integration_context, request_body, channel_data, 'waz up')
assert demisto.addEntry.call_count == 1
add_entry_args = demisto.addEntry.call_args[1]
assert add_entry_args == {
'id': '1',
'entry': 'waz up',
'username': 'Bruce Willis',
'email': '<EMAIL>',
'footer': '\n**From Microsoft Teams**'
}
def test_member_added_handler(mocker, requests_mock):
from MicrosoftTeams import member_added_handler
mocker.patch.object(demisto, 'getIntegrationContext', return_value={})
mocker.patch.object(demisto, 'setIntegrationContext')
mocker.patch.object(demisto, 'params', return_value={'bot_id': bot_id})
requests_mock.get(
f'{service_url}/v3/conversations/{team_id}/members',
json=team_members
)
request_body: dict = {
'recipient': {
'id': f'28:{bot_id}',
'name': 'DemistoBot'
},
'membersAdded': [{
'id': f'28:{bot_id}'
}]
}
channel_data: dict = {
'team': {
'id': team_id,
'name': team_name,
'aadGroupId': team_aad_id
},
'eventType': 'teamMemberAdded',
'tenant': {
'id': tenant_id
}
}
member_added_handler(integration_context, request_body, channel_data)
expected_integration_context: dict = {
'bot_name': 'DemistoBot',
'teams': json.dumps([{
'mirrored_channels': mirrored_channels,
'team_id': team_id,
'team_aad_id': team_aad_id,
'team_members': team_members,
'team_name': team_name
}]),
'tenant_id': tenant_id,
'service_url': service_url
}
assert demisto.setIntegrationContext.call_count == 2
set_integration_context = demisto.setIntegrationContext.call_args[0]
assert len(set_integration_context) == 1
assert set_integration_context[0] == expected_integration_context
def test_mirror_investigation(mocker, requests_mock):
from MicrosoftTeams import mirror_investigation
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'setIntegrationContext')
mocker.patch.object(
demisto,
'params',
return_value={
'team': 'The-A-Team'
}
)
# verify command cannot be executed in the war room
mocker.patch.object(
demisto,
'investigation',
return_value={
'type': 9
}
)
with pytest.raises(ValueError) as e:
mirror_investigation()
assert str(e.value) == 'Can not perform this action in playground.'
# verify channel is mirrored successfully and a message is sent to it
mocker.patch.object(
demisto,
'investigation',
return_value={
'id': '2'
}
)
channel_id: str = 'channel-id'
# create channel mock request
requests_mock.post(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'id': channel_id
}
)
# send message mock request
requests_mock.post(
f'{service_url}/v3/conversations/{channel_id}/activities',
json={}
)
mirror_investigation()
updated_mirrored_channels: list = mirrored_channels[:]
updated_mirrored_channels.append({
'channel_id': 'channel-id',
'investigation_id': '2',
'mirror_type': 'all',
'mirror_direction': 'both',
'auto_close': 'true',
'mirrored': False,
'channel_name': 'incident-2'
})
expected_integration_context: dict = {
'bot_name': 'DemistoBot',
'tenant_id': tenant_id,
'service_url': service_url,
'teams': json.dumps([{
'mirrored_channels': updated_mirrored_channels,
'team_id': team_id,
'team_aad_id': team_aad_id,
'team_members': team_members,
'team_name': 'The-A-Team'
}])
}
assert requests_mock.request_history[1].json() == {
'displayName': 'incident-2',
'description': 'Channel to mirror incident 2'
}
assert requests_mock.request_history[3].json() == {
'text': 'This channel was created to mirror [incident 2](https://test-address:8443#/WarRoom/2) between '
'Teams and Demisto. In order for your Teams messages to be mirrored in Demisto, you need to'
' mention the Demisto Bot in the message.',
'type': 'message'
}
assert demisto.setIntegrationContext.call_count == 3
set_integration_context = demisto.setIntegrationContext.call_args[0]
assert len(set_integration_context) == 1
set_integration_context[0].pop('graph_access_token')
set_integration_context[0].pop('graph_valid_until')
set_integration_context[0].pop('bot_access_token')
set_integration_context[0].pop('bot_valid_until')
assert set_integration_context[0] == expected_integration_context
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Investigation mirrored successfully in channel incident-2.'
# verify channel mirror is updated successfully
mocker.patch.object(demisto, 'setIntegrationContext')
mocker.patch.object(
demisto,
'args',
return_value={
'mirror_type': 'chat',
'direction': 'FromDemisto',
'autoclose': 'false'
}
)
mocker.patch.object(
demisto,
'investigation',
return_value={
'id': '1'
}
)
mirror_investigation()
assert demisto.setIntegrationContext.call_count == 1
set_integration_context = demisto.setIntegrationContext.call_args[0]
assert len(set_integration_context) == 1
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Investigation mirror was updated successfully.'
# verify channel with custom channel name is mirrored successfully
mocker.patch.object(
demisto,
'investigation',
return_value={
'id': '14'
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'channel_name': 'booya'
}
)
mirror_investigation()
assert requests_mock.request_history[5].json() == {
'displayName': 'booya',
'description': 'Channel to mirror incident 14'
}
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Investigation mirrored successfully in channel booya.'
def test_send_message(mocker, requests_mock):
from MicrosoftTeams import send_message
mocker.patch.object(demisto, 'results')
# verify that a mirrored message is skipped
mocker.patch.object(
demisto,
'args',
return_value={
'messageType': 'mirrorEntry',
'originalMessage': 'a mirrored message\n**From Microsoft Teams**'
}
)
assert send_message() is None
# verify notification from server with severity below threshold is not sent
mocker.patch.object(
demisto,
'params',
return_value={
'min_incident_severity': 'Medium',
'team': 'The-A-Team'
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'messageType': 'incidentOpened',
'severity': 1
}
)
assert send_message() is None
# verify error is raised if no user/channel were provided
mocker.patch.object(
demisto,
'args',
return_value={}
)
with pytest.raises(ValueError) as e:
send_message()
assert str(e.value) == 'No channel or team member to send message were provided.'
# verify error is raised if both user and channel were provided
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'somechannel',
'team_member': 'someuser'
}
)
with pytest.raises(ValueError) as e:
send_message()
assert str(e.value) == 'Provide either channel or team member to send message to, not both.'
# verify message is sent properly given user to send to
mocker.patch.object(
demisto,
'params',
return_value={
'bot_id': bot_id
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'team_member': '<NAME>',
'message': 'MESSAGE'
}
)
requests_mock.post(
f'{service_url}/v3/conversations',
json={
'id': 'conversation-id'
}
)
requests_mock.post(
f'{service_url}/v3/conversations/conversation-id/activities',
json={}
)
expected_create_personal_conversation_data: dict = {
'bot': {
'id': f'28:{bot_id}',
'name': 'DemistoBot'
},
'members': [{
'id': '29:1pBMMC85IyjM3tr_MCZi7KW4pw4EULxLN4C7R_xoi3Wva_lOn3VTf7xJlCLK-r-pMumrmoz9agZxsSrCf7__u9R'
}],
'channelData': {
'tenant': {
'id': tenant_id
}
}
}
send_message()
assert requests_mock.request_history[0].json() == expected_create_personal_conversation_data
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
# verify message is sent properly given channel
mocker.patch.object(
demisto,
'params',
return_value={
'team': 'The-A-Team'
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'incident-1',
'message': 'MESSAGE'
}
)
requests_mock.post(
f"{service_url}/v3/conversations/{mirrored_channels[0]['channel_id']}/activities",
json={}
)
send_message()
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
# verify message is sent properly given entitlement
message: dict = {
'message_text': 'is this really working?',
'options': ['yes', 'no', 'maybe'],
'entitlement': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'investigation_id': '72',
'task_id': '23'
}
mocker.patch.object(
demisto,
'args',
return_value={
'team_member': '<EMAIL>',
'message': json.dumps(message)
}
)
expected_ask_user_message: dict = {
'attachments': [{
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'actions': [
{
'data': {
'entitlement': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'investigation_id': '72',
'response': 'yes',
'task_id': '23'
},
'title': 'yes',
'type': 'Action.Submit'
},
{
'data': {
'entitlement': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'investigation_id': '72',
'response': 'no',
'task_id': '23'
},
'title': 'no',
'type': 'Action.Submit'
},
{
'data': {
'entitlement': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'investigation_id': '72',
'response': 'maybe',
'task_id': '23'
},
'title': 'maybe',
'type': 'Action.Submit'
}
],
'body': [{
'text': 'is this really working?',
'type': 'TextBlock'
}],
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'version': '1.0'
},
'contentType': 'application/vnd.microsoft.card.adaptive'
}],
'type': 'message'
}
send_message()
assert requests_mock.request_history[4].json() == expected_ask_user_message
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
# verify proper error is raised if invalid JSON provided as adaptive card
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'channel',
'adaptive_card': 'THISisSTRINGnotJSON'
}
)
with pytest.raises(ValueError) as e:
send_message()
assert str(e.value) == 'Given adaptive card is not in valid JSON format.'
# verify proper error is raised if both message and adaptive card were provided
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'channel',
'message': 'message',
'adaptive_card': '{"a":"b"}'
}
)
with pytest.raises(ValueError) as e:
send_message()
assert str(e.value) == 'Provide either message or adaptive to send, not both.'
# verify proper error is raised if neither message or adaptive card were provided
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'channel'
}
)
with pytest.raises(ValueError) as e:
send_message()
assert str(e.value) == 'No message or adaptive card to send were provided.'
# verify adaptive card sent successfully
adaptive_card: dict = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": {
"$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
"type": "AdaptiveCard",
"version": "1.0",
"body": [
{
"type": "Container",
"items": [{
"type": "TextBlock",
"text": "What a pretty adaptive card"
}]
}
]
}
}
mocker.patch.object(
demisto,
'args',
return_value={
'team_member': '<EMAIL>',
'adaptive_card': json.dumps(adaptive_card)
}
)
expected_conversation: dict = {
'type': 'message',
'attachments': [adaptive_card]
}
send_message()
assert requests_mock.request_history[6].json() == expected_conversation
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
def test_send_message_server_notifications_incident_opened(mocker, requests_mock):
"""
Given:
- Notification from server of an incident opened.
When:
- Sending notification message of the incident opened.
Then:
- Ensure message is sent successfully.
- Verify the message is sent to the dedicated notifications channel.
"""
from MicrosoftTeams import send_message
mocker.patch.object(demisto, 'results')
mocker.patch.object(
demisto,
'params',
return_value={
'team': 'The-A-Team',
'min_incident_severity': 'Low',
'incident_notifications_channel': 'General'
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'incidentNotificationChannel',
'message': 'user has reported an incident tadam.\nView it on https://server#/WarRoom/3247',
'messageType': 'incidentOpened',
'severity': 1,
'to': ''
}
)
requests_mock.get(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'value': [
{
'description': 'general channel',
'displayName': 'General',
'id': '19:67pd3966e74g45f28d<EMAIL>5f<EMAIL>.skype'
}
]
}
)
requests_mock.post(
f'{service_url}/v3/conversations/19:67pd3966e74g45f28d0c65f1689132bb@thread.skype/activities',
json={}
)
send_message()
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
def test_send_message_server_notifications_incident_changed(mocker, requests_mock):
"""
Given:
- Notification from server of an updated incident.
When:
- Sending notification message of the updated incident.
Then:
- Ensure message is sent successfully.
- Verify the message is sent to the dedicated notifications channel.
"""
from MicrosoftTeams import send_message
mocker.patch.object(demisto, 'results')
mocker.patch.object(
demisto,
'params',
return_value={
'team': 'The-A-Team',
'min_incident_severity': 'Low',
'incident_notifications_channel': 'General'
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'incidentNotificationChannel',
'message': 'DBot has updated an incident tadam.\nView it on https://server#/WarRoom/3247',
'messageType': 'incidentChanged',
'severity': 1,
'to': ''
}
)
requests_mock.get(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'value': [
{
'description': 'general channel',
'displayName': 'General',
'id': '19:67pd3966e74g<EMAIL>'
}
]
}
)
requests_mock.post(
f'{service_url}/v3/conversations/19:67pd3966e74g45f28d0c65f1689132bb@thread.skype/activities',
json={}
)
send_message()
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
def test_get_channel_id(requests_mock):
from MicrosoftTeams import get_channel_id
# get channel which is in the integration context
assert get_channel_id('incident-1', team_aad_id) == '19:2cbad0d78c624400ef83a5750539998g@thread.skype'
# get channel which is not in the integration context
requests_mock.get(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'value': [
{
'description': 'channel for incident 1',
'displayName': 'incident-1',
'id': '19:6<EMAIL>'
},
{
'description': 'channel for incident 2',
'displayName': 'incident-3',
'id': '19:67pd3967e74g45f28d0c65f1689132bo@thread.skype'
}
]
}
)
assert get_channel_id('incident-3', team_aad_id) == '19:67pd3967e74g45f28d0c65f1689132<EMAIL>.skype'
# Try a channel which does not exit
with pytest.raises(ValueError) as e:
get_channel_id('incident-4', team_aad_id)
assert str(e.value) == 'Could not find channel: incident-4'
def test_close_channel(mocker, requests_mock):
from MicrosoftTeams import close_channel
requests_mock.delete(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels/19:2cbad0d78c624400ef83a5750539998g@thread.skype',
status_code=204
)
requests_mock.delete(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels/19:2cbad0d78c624400ef83a575<EMAIL>',
status_code=204
)
mocker.patch.object(demisto, 'results')
# close channel without given channel name
mocker.patch.object(demisto, 'investigation', return_value={'id': '1'})
mocker.patch.object(demisto, 'getIntegrationContext', return_value=integration_context)
mocker.patch.object(demisto, 'setIntegrationContext')
close_channel()
assert requests_mock.request_history[0].method == 'DELETE'
assert demisto.setIntegrationContext.call_count == 1
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Channel was successfully closed.'
# try to close channel without given channel name, which does not exist in the integration context
mocker.patch.object(demisto, 'investigation', return_value={'id': '5'})
with pytest.raises(ValueError) as e:
close_channel()
assert str(e.value) == 'Could not find Microsoft Teams channel to close.'
# close channel given channel name
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'setIntegrationContext')
requests_mock.get(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'value': [
{
'description': 'channel for incident 1',
'displayName': 'incident-1',
'id': '19:67pd3967e74g45f28d0c65f1689132bb@thread.skype'
},
{
'description': 'channel for incident 6',
'displayName': 'incident-6',
'id': '19:67pd3967e74g45f28d0c65f1689132bo@thread.skype'
}
]
}
)
requests_mock.delete(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels/19:67pd39<EMAIL>',
status_code=204
)
mocker.patch.object(demisto, 'params', return_value={'team': 'The-A-Team'})
mocker.patch.object(demisto, 'args', return_value={'channel': 'incident-1'})
close_channel()
assert requests_mock.request_history[0].method == 'DELETE'
assert demisto.setIntegrationContext.call_count == 0
assert demisto.results.call_count == 1
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Channel was successfully closed.'
def test_entitlement_handler(mocker, requests_mock):
from MicrosoftTeams import entitlement_handler
mocker.patch.object(demisto, 'handleEntitlementForUser')
conversation_id: str = 'f:3005393407786078157'
activity_id: str = '1:1vW2mx4iDZf05lk18yskL64Wkfwraa76YTGNgDiIi-_5'
requests_mock.put(
f'{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}',
json={'id': 'updateid'}
)
request_body: dict = {
'from': {
'id': '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg',
'aadObjectId': '359d2c3c-162b-414c-b2eq-386461e5l050',
'name': '<NAME>'
},
'replyToId': activity_id
}
value: dict = {
'response': 'Approve!',
'entitlement': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'investigation_id': '100',
'task_id': '4'
}
entitlement_handler(integration_context, request_body, value, conversation_id)
assert demisto.handleEntitlementForUser.call_count == 1
handle_entitlement_args = demisto.handleEntitlementForUser.call_args[1]
assert handle_entitlement_args == {
'incidentID': '100',
'guid': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'taskID': '4',
'email': '<EMAIL>',
'content': 'Approve!'
}
def test_translate_severity():
from MicrosoftTeams import translate_severity
assert translate_severity('Low') == 1
assert translate_severity('NotRealSeverity') == 0
def test_is_investigation_mirrored():
from MicrosoftTeams import is_investigation_mirrored
existing_investigation_id: str = '1'
non_existing_investigation_id: str = '2'
assert is_investigation_mirrored(existing_investigation_id, mirrored_channels) == 0
assert is_investigation_mirrored(non_existing_investigation_id, mirrored_channels) == -1
def test_urlify_hyperlinks():
from MicrosoftTeams import urlify_hyperlinks
message: str = 'Visit https://www.demisto.com and http://www.demisto.com'
formatted_message: str = 'Visit [https://www.demisto.com](https://www.demisto.com) ' \
'and [http://www.demisto.com](http://www.demisto.com)'
assert urlify_hyperlinks(message) == formatted_message
def test_get_team_aad_id(mocker, requests_mock):
from MicrosoftTeams import get_team_aad_id
# verify team ID for team which is in integration context
mocker.patch.object(
demisto,
'params',
return_value={
'team': 'The-A-Team'
}
)
assert get_team_aad_id('The-A-Team') == '7d8efdf8-0c5a-42e3-a489-5ef5c3fc7a2b'
# verify non existing team raises value error
requests_mock.get(
"https://graph.microsoft.com/beta/groups?$filter=resourceProvisioningOptions/Any(x:x eq 'Team')",
json={
'@odata.context': 'https://graph.microsoft.com/beta/$metadata#groups',
'value': [
{
'id': '02bd9fd6-8f93-4758-87c3-1fb73740a315',
'displayName': 'MyGreatTeam',
'groupTypes': [
'Unified'
],
'mailEnabled': True,
'resourceBehaviorOptions': [],
'resourceProvisioningOptions': [
'Team'
],
'securityEnabled': False,
'visibility': 'Private'
},
{
'id': '8090c93e-ba7c-433e-9f39-08c7ba07c0b3',
'displayName': 'WooahTeam',
'groupTypes': [
'Unified'
],
'mailEnabled': True,
'mailNickname': 'X1050LaunchTeam',
'resourceBehaviorOptions': [],
'resourceProvisioningOptions': [
'Team'
],
'securityEnabled': False,
'visibility': 'Private'
}
]
}
)
with pytest.raises(ValueError) as e:
get_team_aad_id('The-B-Team')
assert str(e.value) == 'Could not find requested team.'
# verify team ID for team which is not in integration context
assert get_team_aad_id('MyGreatTeam') == '02bd9fd6-8f93-4758-87c3-1fb73740a315'
def test_get_team_member():
from MicrosoftTeams import get_team_member
user_id: str = '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg'
team_member: dict = {
'username': '<NAME>',
'user_email': '<EMAIL>'
}
assert get_team_member(integration_context, user_id) == team_member
with pytest.raises(ValueError) as e:
get_team_member(integration_context, 'NotRealUser')
assert str(e.value) == 'Team member was not found'
def test_get_team_member_id():
from MicrosoftTeams import get_team_member_id
requested_team_member: str = '<NAME>'
expected_user_id: str = '29:1pBMMC85IyjM3tr_MCZi7KW4pw4EULxLN4C7R_xoi3Wva_lOn3VTf7xJlCLK-r-pMumrmoz9agZxsSrCf7__u9R'
assert get_team_member_id(requested_team_member, integration_context) == expected_user_id
requested_team_member = '<EMAIL>'
assert get_team_member_id(requested_team_member, integration_context) == expected_user_id
requested_team_member = 'TheRock'
with pytest.raises(ValueError) as e:
get_team_member_id(requested_team_member, integration_context)
assert str(e.value) == 'Team member TheRock was not found'
def test_create_adaptive_card():
from MicrosoftTeams import create_adaptive_card
body: list = [{
'type': 'TextBlock',
'size': 'Medium',
'weight': 'Bolder',
'text': 'What a beautiful text'
}]
expected_adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'body': body
}
}
assert create_adaptive_card(body) == expected_adaptive_card
actions: list = [{
'type': 'Action.OpenUrl',
'title': 'DEMISTO',
'url': 'https://www.demisto.com'
}]
expected_adaptive_card['content']['actions'] = actions
assert create_adaptive_card(body, actions) == expected_adaptive_card
def test_process_tasks_list():
from MicrosoftTeams import process_tasks_list
data_by_line: list = [
'Task | Incident | Due | Link ',
'=========================================|================================|=====================|=====',
'Manually review the incident | 21 - nnn | 0001-01-01 00:00:00 | '
'https://demisto.com/#/WorkPlan/21'
]
expected_adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'body': [{
'type': 'FactSet',
'facts': [
{
'title': 'Task:',
'value': 'Manually review the incident'
},
{
'title': 'Incident:',
'value': '21 - nnn'
},
{
'title': 'Due:',
'value': '0001-01-01 00:00:00'
},
{
'title': 'Link:',
'value': '[https://demisto.com/#/WorkPlan/21](https://demisto.com/#/WorkPlan/21)'
}
]
}]
}
}
assert process_tasks_list(data_by_line) == expected_adaptive_card
def test_process_incidents_list():
from MicrosoftTeams import process_incidents_list
data_by_line: list = [
'ID | Name | Status | Type | Owner | Created | Link ',
'===========|======================|=============|=============|=============|=====================|=====',
'257 | w | Active | Unclassifie | god | 2019-07-28 16:42:40 | '
'https://demisto.com/#/WarRoom/257',
'250 | gosa | Active | Unclassifie | mozes | 2019-07-28 16:16:49 | '
'https://demisto.com/#/WarRoom/250 '
]
expected_adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'body': [
{
'type': 'FactSet',
'facts': [
{
'title': 'ID:',
'value': '257'
},
{
'title': 'Name:',
'value': 'w'
},
{
'title': 'Status:',
'value': 'Active'
},
{
'title': 'Type:',
'value': 'Unclassifie'
},
{
'title': 'Owner:',
'value': 'god'
},
{
'title': 'Created:',
'value': '2019-07-28 16:42:40'
},
{
'title': 'Link:',
'value': '[https://demisto.com/#/WarRoom/257](https://demisto.com/#/WarRoom/257)'
}
]
},
{
'type': 'FactSet',
'facts': [
{
'title': 'ID:',
'value': '250'
},
{
'title': 'Name:',
'value': 'gosa'
},
{
'title': 'Status:',
'value': 'Active'
},
{
'title': 'Type:',
'value': 'Unclassifie'
},
{
'title': 'Owner:',
'value': 'mozes'
},
{
'title': 'Created:',
'value': '2019-07-28 16:16:49'
},
{
'title': 'Link:',
'value': '[https://demisto.com/#/WarRoom/250](https://demisto.com/#/WarRoom/250)'
}
]
}
]
}
}
assert process_incidents_list(data_by_line) == expected_adaptive_card
def test_process_mirror_or_unknown_message():
from MicrosoftTeams import process_mirror_or_unknown_message
message: str = 'I can understand the following commands:\nlist incidents [page x]\nlist my incidents [page x]\n' \
'list my tasks\nlist closed incidents\nnew incident [details]\nmirror incident-id'
expected_adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'body': [{
'type': 'TextBlock',
'text': 'I can understand the following commands:\n\nlist incidents [page x]\n\nlist my incidents [page'
' x]\n\nlist my tasks\n\nlist closed incidents\n\nnew incident [details]\n\nmirror incident-id',
'wrap': True
}]
}
}
assert process_mirror_or_unknown_message(message) == expected_adaptive_card
def test_get_participant_info():
from MicrosoftTeams import get_participant_info
participants = {'organizer': {'upn': 'mail.com', 'role': 'presenter',
'identity': {'phone': None, 'guest': None, 'encrypted': None,
'onPremises': None, 'applicationInstance': None,
'application': None, 'device': None,
'user':
{'id': 'id_identifier',
'displayName': 'best_user',
'tenantId': 'tenantId_identifier',
'identityProvider': 'AAD'}}}, 'attendees': []}
participant_id, participant_display_name = get_participant_info(participants)
assert participant_id == 'id_identifier'
assert participant_display_name == 'best_user'
def test_create_channel(requests_mock):
from MicrosoftTeams import create_channel
requests_mock.post(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'id': '19:67pd3967e74g45f28d0c65f1689132bb@thread.skype'
}
)
channel_name: str = 'CrazyChannel'
response = create_channel(team_aad_id, channel_name)
assert response == '19:<EMAIL>.skype'
def test_create_meeting_command(requests_mock, mocker):
from MicrosoftTeams import create_meeting_command
mocker.patch.object(demisto, 'args', return_value={"subject": "Best_Meeting", "member": "username"})
mocker.patch.object(demisto, 'results')
requests_mock.get(
'https://graph.microsoft.com/v1.0/users',
json={"value": [{"id": "userid1"}]}
)
requests_mock.post(
'https://graph.microsoft.com/v1.0/users/userid1/onlineMeetings',
json={
"chatInfo": {
"threadId": "19:@thread.skype",
"messageId": "0",
"replyChainMessageId": "0"
},
"creationDateTime": "2019-07-11T02:17:17.6491364Z",
"startDateTime": "2019-07-11T02:17:17.6491364Z",
"endDateTime": "2019-07-11T02:47:17.651138Z",
"id": "id_12345",
"joinWebUrl": "https://teams.microsoft.com/l/meetup-join/12345",
"participants": {
"organizer": {
"identity": {
"user": {
"id": "user_id_12345",
"displayName": "Demisto"
}
},
"upn": "upn-value"
}
},
"subject": "User Token Meeting"
}
)
expected_results = 'The meeting "Best_Meeting" was created successfully'
create_meeting_command()
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0]['HumanReadable'] == expected_results
assert results[0]['Contents'].get('id') == 'id_12345'
def test_get_team_members(requests_mock):
from MicrosoftTeams import get_team_members
requests_mock.get(
f'{service_url}/v3/conversations/{team_aad_id}/members',
json=team_members
)
assert get_team_members(service_url, team_aad_id) == team_members
def test_update_message(requests_mock):
from MicrosoftTeams import update_message
activity_id: str = '1:1vW2mx4iDZf05lk18yskL64Wkfwraa76YTGNgDiIi-_5'
conversation_id: str = 'f:3005393407786078157'
requests_mock.put(
f'{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}',
json={'id': 'updateid'}
)
expected_conversation: dict = {
'type': 'message',
'attachments': [{
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0', 'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'body': [{
'type': 'TextBlock', 'text': 'OMG!'
}]
}
}]
}
update_message(service_url, conversation_id, activity_id, 'OMG!')
assert requests_mock.request_history[0].method == 'PUT'
assert json.loads(requests_mock.request_history[0].body) == expected_conversation
# def test_create_team(mocker, requests_mock):
# from MicrosoftTeams import create_team
# mocker.patch.object(
# demisto,
# 'args',
# return_value={
# 'display_name': 'OhMyTeam',
# 'mail_nickname': 'No<PASSWORD>names<PASSWORD>',
# 'owner': '<EMAIL>',
# 'mail_enabled': 'true',
# 'security_enabled': 'false'
# }
# )
# requests_mock.get(
# f'https://graph.microsoft.com/v1.0/users',
# json={
# 'value': team_members
# }
# )
# with pytest.raises(ValueError) as e:
# create_team()
# assert str(e.value) == 'Could not find given users to be Team owners.'
# mocker.patch.object(
# demisto,
# 'args',
# return_value={
# 'display_name': 'OhMyTeam',
# 'mail_nickname': 'NoNicknames<PASSWORD>',
# 'owner': '<EMAIL>'
# }
# )
def test_direct_message_handler(mocker, requests_mock):
from MicrosoftTeams import direct_message_handler
mocker.patch.object(
demisto,
'createIncidents',
return_value={
'id': '4',
'name': 'incidentnumberfour'
}
)
requests_mock.post(
f'{service_url}/v3/conversations/conversation-id/activities',
json={}
)
request_body: dict = {
'from': {
'id': '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg'
}
}
conversation: dict = {
'id': 'conversation-id'
}
# verify create incident fails on un allowed external incident creation and non found user
message: str = 'create incident name=GoFish type=Phishing'
mocker.patch.object(demisto, 'findUser', return_value=None)
direct_message_handler(integration_context, request_body, conversation, message)
assert requests_mock.request_history[0].json() == {
'text': 'You are not allowed to create incidents.', 'type': 'message'
}
# verify create incident successfully
mocker.patch.object(demisto, 'findUser', return_value={'id': 'nice-demisto-id'})
direct_message_handler(integration_context, request_body, conversation, message)
assert requests_mock.request_history[1].json() == {
'text': "Successfully created incident incidentnumberfour.\n"
"View it on: [https://test-address:8443#/WarRoom/4](https://test-address:8443#/WarRoom/4)",
'type': 'message'
}
# verify get my incidents
my_incidents: str = "```ID | Name | Status | Type | Owner | Created" \
" | Link\n ===========|======================|=============|=============|====" \
"=========|=====================|=====\n257 | w | Active | " \
"Unclassifie | god | 2019-07-28 16:42:40 | https://demisto.com/#/WarRoom/257```"
mocker.patch.object(demisto, 'directMessage', return_value=my_incidents)
message = 'list my incidents'
direct_message_handler(integration_context, request_body, conversation, message)
assert requests_mock.request_history[2].json() == {
'attachments': [{
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'body': [{
'facts': [
{
'title': 'ID:',
'value': '257'
},
{
'title': 'Name:',
'value': 'w'
},
{
'title': 'Status:',
'value': 'Active'
},
{
'title': 'Type:',
'value': 'Unclassifie'
},
{
'title': 'Owner:',
'value': 'god'
},
{
'title': 'Created:',
'value': '2019-07-28 16:42:40'
},
{
'title': 'Link:',
'value': '[https://demisto.com/#/WarRoom/257](https://demisto.com/#/WarRoom/257)'
}
],
'type': 'FactSet'
}],
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'version': '1.0'
},
'contentType': 'application/vnd.microsoft.card.adaptive'
}],
'type': 'message'
}
# verify error message raised by Demisto server is sent as message as expectec
mocker.patch.object(
demisto,
'directMessage',
side_effect=ValueError(
'I\'m sorry but I was unable to find you as a Demisto user for email [<EMAIL>]'
)
)
direct_message_handler(integration_context, request_body, conversation, message)
assert requests_mock.request_history[3].json() == {
'type': 'message',
'text': 'I\'m sorry but I was unable to find you as a Demisto user for email [<EMAIL>]'
}
def test_error_parser():
from MicrosoftTeams import error_parser
class MockResponse:
def __init__(self, json_data, status_code, text=''):
self.json_data = json_data
self.status_code = status_code
self.text = text
def json(self):
return self.json_data
# verify bot framework error parsed as expected
error_description: str = "AADSTS700016: Application with identifier '2bc5202b-ad6a-4458-8821-e104af433bbb' " \
"was not found in the directory 'botframework.com'. This can happen if the application " \
"has not been installed by the administrator of the tenant or consented to by any user " \
"in the tenant. You may have sent your authentication request to the wrong tenant.\r\n" \
"Trace ID: 9eaeeec8-7f9e-4fb8-b319-5413581f0a00\r\nCorrelation ID: " \
"138cb511-2484-410e-b9c1-14b15accbeba\r\nTimestamp: 2019-08-28 13:18:44Z"
bot_error_json_response: dict = {
'error': 'unauthorized_client',
'error_description': error_description,
'error_codes': [
700016
],
'timestamp': '2019-08-28 13:18:44Z',
'trace_id': '9eaeeec8-7f9e-4fb8-b319-5413581f0a11',
'correlation_id': '138cb111-2484-410e-b9c1-14b15accbeba',
'error_uri': 'https://login.microsoftonline.com/error?code=700016'
}
bot_error_json_response = MockResponse(bot_error_json_response, 400)
assert error_parser(bot_error_json_response, 'bot') == error_description
# verify graph error parsed as expected
error_code: str = 'InvalidAuthenticationToken'
error_message: str = 'Access token validation failure.'
graph_error_json_response: dict = {
'error': {
'code': error_code,
'message': error_message,
'innerError': {
'request-id': 'c240ab22-4463-4a1f-82bc-8509d8190a77',
'date': '2019-08-28T13:37:14'
}
}
}
graph_error_json_response = MockResponse(graph_error_json_response, 401)
assert error_parser(graph_error_json_response) == f'{error_code}: {error_message}'
def test_integration_health(mocker):
from MicrosoftTeams import integration_health
mocker.patch.object(demisto, 'results')
expected_results = """### Microsoft API Health
|Bot Framework API Health|Graph API Health|
|---|---|
| Operational | Operational |
### Microsoft Teams Mirrored Channels
|Channel|Investigation ID|Team|
|---|---|---|
| incident-10 | 10 | The-A-Team |
| incident-2 | 2 | The-A-Team |
| booya | 14 | The-A-Team |
"""
integration_health()
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0]['HumanReadable'] == expected_results
| StarcoderdataPython |
1702446 | <filename>thelma/tools/worklists/series.py<gh_stars>1-10
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Class for worklist series support.
AAB
"""
from StringIO import StringIO
from thelma.tools.semiconstants import PIPETTING_SPECS_NAMES
from thelma.tools.semiconstants import get_pipetting_specs
from thelma.tools.base import BaseTool
from thelma.tools.worklists.biomek \
import SampleDilutionWorklistWriter
from thelma.tools.worklists.biomek \
import SampleTransferWorklistWriter
from thelma.tools.worklists.execution \
import RackSampleTransferExecutor
from thelma.tools.worklists.execution \
import SampleDilutionWorklistExecutor
from thelma.tools.worklists.execution \
import SampleTransferWorklistExecutor
from thelma.tools.writers import LINEBREAK_CHAR
from thelma.tools.writers import create_zip_archive
from thelma.tools.writers import merge_csv_streams
from thelma.tools.utils.base import VOLUME_CONVERSION_FACTOR
from thelma.tools.utils.racksector import RackSectorTranslator
from thelma.entities.liquidtransfer import ExecutedWorklist
from thelma.entities.liquidtransfer import TRANSFER_TYPES
from thelma.entities.user import User
from thelma.entities.utils import get_user
__docformat__ = 'reStructuredText en'
__all__ = ['_LiquidTransferJob',
'SampleDilutionJob',
'SampleTransferJob',
'RackSampleTransferJob',
'_SeriesTool',
'_SeriesWorklistWriter',
'SeriesExecutor',
'SerialWriterExecutorTool']
class _LiquidTransferJob(object):
"""
Helper class storing target and source data for a worklist file generation
or a transfer execution (to provide data for serial executors or
file generators).
:Note: There are no checks carried out here.
"""
#: The transfer type supported by this class
#: (see :class:`thelma.entities.liquidtransfer.TRANSFER_TYPES`).
SUPPORTED_TRANSFER_TYPE = None
#: The executor class supported by this transfer job class.
EXECUTOR_CLS = None
#: The writer classes for the different pipetting techniques.
WRITER_CLASSES = dict()
def __init__(self, index, target_rack, pipetting_specs):
"""
Constructor.
:param index: The index of the transfer job within the series.
:type index: :class:`int`
:param target_rack: The rack taking up the volumes.
:type target_rack: :class:`thelma.entities.rack.Rack`
:param pipetting_specs: Defines the properties (like the
transfer volume range, etc.)
:type pipetting_specs: :class:`basestring` (pipetting specs name) or
:class:`thelma.entities.liquidtransfer.PipettingSpecs`
"""
#: The index of the transfer job within the series.
self.index = index
#: The rack into which the volumes will be dispensed.
self.target_rack = target_rack
if isinstance(pipetting_specs, basestring):
pipetting_specs = get_pipetting_specs(pipetting_specs)
#: The :class:`PipettingSpecs` to be used for this transfer.
self.pipetting_specs = pipetting_specs
def get_executor(self, tool, user):
"""
Returns an configured :class:`LiquidTransferExecutor`.
:param tool: The parent tool.
:type tool: :class:`thelma.tools.base.BaseTool`
:param user: The DB user executing the transfers.
:type user: :class:`thelma.entities.user.User`
:return: configured :class:`LiquidTransferExecutor`
"""
kw = self._get_kw_for_executor(tool, user)
return self.EXECUTOR_CLS(**kw) #pylint: disable=E1102
def _get_kw_for_executor(self, tool, user):
"""
Returns a keywords map that can be used to initialise an executor.
"""
kw = dict()
kw['user'] = user
kw['parent'] = tool
kw['target_rack'] = self.target_rack
kw['pipetting_specs'] = self.pipetting_specs
return kw
def get_worklist_writer(self, tool):
"""
Returns an configured :class:`WorklistWriter` (or *None* if there is
no writer class registered for the used :attr:`pipetting_specs`).
:param tool: The parent tool.
:type tool: :class:`thelma.tools.base.BaseTool`
:return: configured :class:`WorklistWriter`
"""
ps_name = self.pipetting_specs.name
result = None
if self.WRITER_CLASSES.has_key(ps_name):
writer_cls = self.WRITER_CLASSES[ps_name]
if not writer_cls is None:
kw = self._get_kw_for_worklist_writer(tool)
result = writer_cls(**kw)
return result
def _get_kw_for_worklist_writer(self, tool):
"""
Returns a keywords map that can be used to initialise a worklist
writer.
"""
kw = dict()
kw['target_rack'] = self.target_rack
kw['parent'] = tool
kw['pipetting_specs'] = self.pipetting_specs
return kw
class SampleDilutionJob(_LiquidTransferJob):
"""
Transfer job for a planned sample dilutions.
:Note: There are no checks carried out here.
"""
SUPPORTED_TRANSFER_TYPE = TRANSFER_TYPES.SAMPLE_DILUTION
EXECUTOR_CLS = SampleDilutionWorklistExecutor
WRITER_CLASSES = {
PIPETTING_SPECS_NAMES.BIOMEK : SampleDilutionWorklistWriter,
PIPETTING_SPECS_NAMES.MANUAL : SampleDilutionWorklistWriter,
PIPETTING_SPECS_NAMES.CYBIO : SampleDilutionWorklistWriter,
PIPETTING_SPECS_NAMES.BIOMEKSTOCK : SampleDilutionWorklistWriter}
def __init__(self, index, planned_worklist, target_rack, reservoir_specs,
source_rack_barcode=None, ignored_positions=None):
"""
Constructor.
:param planned_worklist: The worklist containing the planned liquid
transfers.
:type planned_worklist:
:class:`thelma.entities.liquidtransfer.PlannedWorklist`
:param reservoir_specs: The specs for the source reservoir or rack.
:type reservoir_specs:
:class:`thelma.entities.liquidtransfer.ResevoirSpecs`
:param source_rack_barcode: The barcode of the source reservoir or
rack (only required for worklist file generation.
:type source_rack_barcode: :class:`basestring`
:param ignored_positions: A list of rack positions whose planned
transfers should be ignored (refers to target positions).
:type ignored_positions: :class:`list` of :class:`RackPosition`
"""
_LiquidTransferJob.__init__(self, index, target_rack,
planned_worklist.pipetting_specs)
#: The worklist containing the planned liquid transfers.
self.planned_worklist = planned_worklist
#: The specs for the source reservoir or rack.
self.reservoir_specs = reservoir_specs
#: The barcode of the source reservoir or rack.
self.source_rack_barcode = source_rack_barcode
#: A list of rack positions whose planned liquid transfers should be
#: ignored (refers to target positions).
self.ignored_positions = ignored_positions
def _get_kw_for_worklist_writer(self, tool):
"""
Returns a keywords map that can be used to initialise a worklist
writer.
"""
kw = _LiquidTransferJob._get_kw_for_worklist_writer(self, tool)
kw['planned_worklist'] = self.planned_worklist
kw['reservoir_specs'] = self.reservoir_specs
kw['source_rack_barcode'] = self.source_rack_barcode
kw['ignored_positions'] = self.ignored_positions
return kw
def _get_kw_for_executor(self, tool, user):
"""
Returns a keywords map that can be used to initialise an executor.
"""
kw = _LiquidTransferJob._get_kw_for_executor(self, tool, user)
kw['planned_worklist'] = self.planned_worklist
kw['reservoir_specs'] = self.reservoir_specs
kw['ignored_positions'] = self.ignored_positions
return kw
def __repr__(self):
str_format = '<%s index: %i, label: %s, target rack: %s, reservoir ' \
'specs: %s, source rack barcode: %s>'
params = (self.__class__.__name__, self.index,
self.planned_worklist.label, self.target_rack.barcode,
self.reservoir_specs, self.source_rack_barcode)
return str_format % params
class SampleTransferJob(_LiquidTransferJob):
"""
Transfer job for a planned sample transfers.
:Note: There are no checks carried out here.
"""
SUPPORTED_TRANSFER_TYPE = TRANSFER_TYPES.SAMPLE_TRANSFER
EXECUTOR_CLS = SampleTransferWorklistExecutor
WRITER_CLASSES = {
PIPETTING_SPECS_NAMES.BIOMEK : SampleTransferWorklistWriter,
PIPETTING_SPECS_NAMES.MANUAL : SampleTransferWorklistWriter,
PIPETTING_SPECS_NAMES.BIOMEKSTOCK : SampleTransferWorklistWriter,
PIPETTING_SPECS_NAMES.CYBIO : SampleTransferWorklistWriter}
def __init__(self, index, planned_worklist, target_rack, source_rack,
ignored_positions=None):
"""
Constructor.
:param planned_worklist: The worklist containing the planned liquid
transfers.
:type planned_worklist:
:class:`thelma.entities.liquidtransfer.PlannedWorklist`
:param source_rack: The rack providing the volumes.
:type source_rack: :class:`thelma.entities.rack.Rack`
:param ignored_positions: A list of rack positions whose planned
transfers should be ignored (refers to target positions).
:type ignored_positions: :class:`list` of :class:`RackPosition`
"""
_LiquidTransferJob.__init__(self, index, target_rack,
planned_worklist.pipetting_specs)
#: The worklist containing the planned liquid transfers.
self.planned_worklist = planned_worklist
#: The rack providing the volumes.
self.source_rack = source_rack
#: A list of rack positions whose planned liquid transfers should be
#: ignored (refers to target positions).
self.ignored_positions = ignored_positions
def _get_kw_for_worklist_writer(self, tool):
"""
Returns a keywords map that can be used to initialise a worklist
writer.
"""
kw = _LiquidTransferJob._get_kw_for_worklist_writer(self, tool)
kw['planned_worklist'] = self.planned_worklist
kw['source_rack'] = self.source_rack
kw['ignored_positions'] = self.ignored_positions
return kw
def _get_kw_for_executor(self, tool, user):
"""
Returns a keywords map that can be used to initialise an executor.
"""
kw = _LiquidTransferJob._get_kw_for_executor(self, tool, user)
kw['planned_worklist'] = self.planned_worklist
kw['source_rack'] = self.source_rack
kw['ignored_positions'] = self.ignored_positions
return kw
def __repr__(self):
str_format = '<%s index: %s, label: %s, target rack: %s, ' \
'source rack: %s>'
params = (self.__class__.__name__, self.index,
self.planned_worklist.label, self.target_rack.barcode,
self.source_rack.barcode)
return str_format % params
class RackSampleTransferJob(_LiquidTransferJob):
"""
A transfer job for a planned sample transfers. This sort of jobs
is assumed to be performed by the CyBio.
:Note: There are no checks carried out here.
"""
SUPPORTED_TRANSFER_TYPE = TRANSFER_TYPES.RACK_SAMPLE_TRANSFER
EXECUTOR_CLS = RackSampleTransferExecutor
# no writer support
def __init__(self, index, planned_rack_sample_transfer, target_rack,
source_rack):
"""
Constructor.
:param planned_rack_sample_transfer: The data for the planned liquid
transfer.
:type planned_rack_sample_transfer:
:class:`thelma.entities.liquidtransfer.PlannedRackSampleTransfer`
:param source_rack: The rack providing the volumes.
:type source_rack: :class:`thelma.entities.rack.Rack`
"""
_LiquidTransferJob.__init__(self, index, target_rack,
PIPETTING_SPECS_NAMES.CYBIO)
#: The worklist containing the planned liquid transfers.
self.planned_rack_sample_transfer = planned_rack_sample_transfer
#: The rack providing the volumes.
self.source_rack = source_rack
def _get_kw_for_executor(self, tool, user):
"""
Returns a keywords map that can be used to initialise an executor.
"""
kw = _LiquidTransferJob._get_kw_for_executor(self, tool, user)
kw['planned_rack_sample_transfer'] = self.planned_rack_sample_transfer
kw['pipetting_specs'] = self.pipetting_specs
kw['source_rack'] = self.source_rack
return kw
def _get_kw_for_worklist_writer(self, tool):
"""
They are no real worklist files for rack transfers but only sections
in instruction files (which are handled separately).
"""
raise NotImplementedError('No worklists for rack transfers.')
def get_worklist_writer(self, tool):
"""
They are no real worklist files for rack transfers but only sections
in instruction files (which are handled separately).
"""
return None
def __repr__(self):
str_format = '<%s index: %s, target rack: %s, source rack: %s>'
params = (self.__class__.__name__, self.index,
self.target_rack.barcode, self.source_rack.barcode)
return str_format % params
class _SeriesTool(BaseTool):
"""
This abstract tool is the base for all worklist series tools (serial
generation of worklist files and serial execution).
The use of such a tool is required if the tasks for the series depend
on one another, e.g. because the volume of for a later source well must
be provided first.
The tool take a list of :class:`_LiquidTransferJob` objects.
"""
def __init__(self, transfer_jobs, user=None, parent=None):
"""
Constructor.
:param dict transfer_jobs: maps job indices to
:class:`LiquidTransferJob` objects.
:param user: The user used for the simulated executions.
:type user: :class:`thelma.entities.user.User`
:default user: None
"""
BaseTool.__init__(self, parent=parent)
#: :class:`LiquidTransferJob` objects mapped onto job indices.
self.transfer_jobs = transfer_jobs
#: Stores all involved real racks (mapped onto their barcode).
self._barcode_map = None
#: The user used for the simulated executions.
self.user = user
def reset(self):
"""
Resets all attributes except for initialisation values.
"""
BaseTool.reset(self)
self._barcode_map = dict()
def run(self):
self.reset()
self._check_input()
if not self.has_errors():
self._execute_task()
def _check_input(self):
"""
Checks the input values.
"""
self.add_debug('Check input ...')
self._check_input_class('user', self.user, User)
self._check_input_map_classes(self.transfer_jobs, 'transfer job map',
'job index', int, 'transfer job',
_LiquidTransferJob)
def _execute_task(self):
"""
The tasks performed by the specific tool.
"""
raise NotImplementedError('Abstract method.')
def _update_racks(self, transfer_job):
"""
Replaces the rack of an transfer job by updated racks.
"""
target_barcode = transfer_job.target_rack.barcode
if self._barcode_map.has_key(target_barcode):
transfer_job.target_rack = self._barcode_map[target_barcode]
if isinstance(transfer_job, (SampleTransferJob, RackSampleTransferJob)):
source_barcode = transfer_job.source_rack.barcode
if self._barcode_map.has_key(source_barcode):
transfer_job.source_rack = self._barcode_map[source_barcode]
def _execute_job(self, transfer_job):
"""
Executes a transfer job.
"""
executor = transfer_job.get_executor(self, self.user)
executed_item = executor.get_result()
if executed_item is None:
msg = 'Error when trying to execute transfer job: %s.' \
% (transfer_job)
self.add_error(msg)
result = None
else:
target_rack = executor.target_rack
self._barcode_map[target_rack.barcode] = target_rack
try:
source_rack = executor.source_rack
except AttributeError:
pass
else:
self._barcode_map[source_rack] = source_rack
result = executed_item
return result
class _SeriesWorklistWriter(_SeriesTool):
"""
Tool that creates worklist files for a whole series of planned worklists.
It is required to use (in contrast to the use of single worklist writers)
if the worklist of a series depend on one another, e.g. because the volumes
for a later source well must be provided first.
Hence, between two worklists, the tool will run a executor to update
the rack data. However, the changes of the rack will not be passed to the
DB.
:Note: The worklists must be provided as :class:`_LiquidTransferJob`
objects.
**Return Value:** A map with key = job index, value = worklist stream.
"""
NAME = 'Series Worklist Writer'
def __init__(self, transfer_jobs, parent=None):
_SeriesTool.__init__(self, transfer_jobs, user=None, parent=parent)
#: Stores the generated streams (mapped onto indices).
self._stream_map = None
#: The stream for the rack transfers (if there are any).
self._rack_transfer_stream = None
#: The index of the first rack transfer job (if any, there might be
#: several, however they all share one file).
self._rack_transfer_index = None
#: The number of rack transfer that have occurred so far.
self._rack_transfer_count = None
#: The user used for the simulated executions.
self.user = get_user('it')
def reset(self):
"""
Resets all attributes except for initialisation values.
"""
_SeriesTool.reset(self)
self._stream_map = dict()
self._rack_transfer_stream = None
self._rack_transfer_index = None
self._rack_transfer_count = 0
def _execute_task(self):
"""
The tasks performed by the specific tool (worklist generation).
"""
self.add_info('Start series worklist file generation ...')
self.__write_worklists()
if not self.has_errors():
if not self._rack_transfer_stream is None:
self._rack_transfer_stream.seek(0)
self._stream_map[self._rack_transfer_index] = \
self._rack_transfer_stream
self.return_value = self._stream_map
self.add_info('Series worklist generation completed.')
def __write_worklists(self):
# Writes the worklists files for the passed jobss (in the right order;
# execution is carried out after each worklist stream creation).
self.add_debug('Write Worklists ...')
last_index = max(self.transfer_jobs.keys())
for job_index in sorted(self.transfer_jobs.keys()):
transfer_job = self.transfer_jobs[job_index]
self._update_racks(transfer_job)
# Write worklist
writer = self.__get_writer(transfer_job)
if writer is None and isinstance(transfer_job,
RackSampleTransferJob):
# Rack transfers are treated differently.
self.__write_rack_transfer_section(transfer_job)
elif writer is None:
msg = 'Unable to find a writer for transfer job "%s".' \
% (transfer_job)
self.add_warning(msg)
else:
stream = writer.get_result()
if stream is None:
msg = 'Error when trying to generate file for worklist ' \
'"%s".' % (transfer_job.planned_worklist.label)
self.add_error(msg)
break
else:
self._stream_map[job_index] = stream
# Even if there is no worklist writer for this case, it might
# be that we have to execute the transfer as part of the series.
# Execute changes (to prepare for the next worklist).
if job_index == last_index:
break # no execution necessary
executed_item = self._execute_job(transfer_job)
if executed_item is None:
break
def __get_writer(self, transfer_job):
# Returns the writer for the given transfer job.
return transfer_job.get_worklist_writer(self)
def __write_rack_transfer_section(self, rack_transfer_job):
# Writes a section for the rack transfer stream (and initialises it
# if necessary).
if self._rack_transfer_stream is None:
self._rack_transfer_stream = StringIO()
self._rack_transfer_index = rack_transfer_job.index
self._rack_transfer_count += 1
writer = RackSampleTransferSectionWriter(self._rack_transfer_count,
rack_transfer_job,
parent=self)
paragraph = writer.get_result()
if paragraph is None:
msg = 'Error when trying to generate section for rack ' \
'transfer job %s.' % (rack_transfer_job)
self.add_error(msg)
else:
self._rack_transfer_stream.write(paragraph)
class SeriesExecutor(_SeriesTool):
"""
Tool that executes a whole series of worklists and/or planned liquid
transfers (rack transfers).
It is required to use (in contrast to the use
of single executor) if the worklist of a series depend on one another,
e.g. because the volumes for a later source well must be provided first.
:Note: The planned liquid transfers or worklists must be provided as
:class:`_LiquidTransferJob` objects.
**Return Value:** A map with key = job index, value = executed item.
"""
NAME = 'Series Executor'
def __init__(self, transfer_jobs, user, parent=None):
_SeriesTool.__init__(self, transfer_jobs, user=user, parent=parent)
#: Stores results of the transfer job execution (mapped onto indices).
self._execution_map = None
def reset(self):
"""
Resets all attributes except for initialisation values.
"""
_SeriesTool.reset(self)
self._execution_map = dict()
def _execute_task(self):
"""
The tasks performed by the specific tool (planned liquid transfer
execution).
"""
self.add_info('Starting worklist series.')
for job_index in sorted(self.transfer_jobs.keys()):
transfer_job = self.transfer_jobs[job_index]
self._update_racks(transfer_job)
executed_item = self._execute_job(transfer_job)
if executed_item is None:
break
self._execution_map[job_index] = executed_item
if not self.has_errors():
self.return_value = self._execution_map
self.add_info('Series execution completed.')
class RackSampleTransferWriter(_SeriesTool):
"""
Creates an overview file for a list of rack transfer jobs.
:Note: This writer will not conduct any checks.
**Return Value:** Stream for an TXT file.
"""
NAME = 'Rack Sample Transfer Writer'
#: This line marks a new transfer.
HEAD_LINE = 'STEP %i:'
#: The source data line.
FROM_LINE = 'FROM: rack %s'
#: The target data line.
TO_LINE = 'TO: rack %s'
#: The sector data is added to the line if there is more than one
#: sector in the transfer.
SECTOR_ADDITION = ' sector %i'
#: The volume line.
VOLUME_LINE = 'volume: %.1f ul'
def __init__(self, rack_transfer_jobs, parent=None):
_SeriesTool.__init__(self, rack_transfer_jobs, parent=parent)
#: The stream for the TXT file.
self.__stream = None
#: Counts the number of Cybio steps.
self.__step_counter = None
#: The user used for the simulated executions.
self.user = get_user('it')
def reset(self):
"""
Resets all attributes except for initialisation values.
"""
_SeriesTool.reset(self)
self.__stream = None
self.__step_counter = 0
def _execute_task(self):
"""
The tasks performed by the specific tool (rack transfer overview
generation).
"""
self.add_info('Start rack transfer file generation ...')
self.__stream = StringIO()
self.__write_content()
if not self.has_errors():
self.return_value = self.__stream
self.add_info('File creation completed.')
def __write_content(self):
# Writes the file content.
for job_index in sorted(self.transfer_jobs.keys()):
rack_transfer_job = self.transfer_jobs[job_index]
self._update_racks(rack_transfer_job)
self.__write_rack_transfer_job_record(rack_transfer_job)
# Execute changes (to prepare for the next worklist).
executed_item = self._execute_job(rack_transfer_job)
if executed_item is None:
break
self.__stream.seek(0)
def __write_rack_transfer_job_record(self, rack_transfer_job):
# Adds an rack transfer record to the stream.
self.__step_counter += 1
writer = RackSampleTransferSectionWriter(
step_number=self.__step_counter,
rack_transfer_job=rack_transfer_job,
parent=self)
paragraph = writer.get_result()
if paragraph is None:
msg = 'Error when trying to write paragraph for rack transfer %s.' \
% (rack_transfer_job)
self.add_error(msg)
else:
self.__stream.write(paragraph)
class RackSampleTransferSectionWriter(BaseTool):
"""
Writes a section for a single rack transfer.
:Note: There are no checks carried out here.
**Return Value:** string
"""
NAME = 'Rack Transfer Section Writer'
#: This line marks a new transfer.
HEAD_LINE = 'STEP %i:'
#: The source data line.
FROM_LINE = 'FROM: rack %s'
#: The target data line.
TO_LINE = 'TO: rack %s'
#: The sector data is added to the line if there is more than one
#: sector in the transfer.
SECTOR_ADDITION = ' sector %i'
#: The volume line.
VOLUME_LINE = 'volume: %.1f ul'
def __init__(self, step_number, rack_transfer_job, parent=None):
"""
Constructor.
:param int step_number: The step number (of all rack transfers in
the series).
:param rack_transfer_job: The job to write down.
:type rack_transfer_job: :class:`RackSampleTransferJob`
"""
BaseTool.__init__(self, parent=parent)
#: The step (of all rack transfers in teh series).
self.step_number = step_number
#: The job to write down.
self.rack_transfer_job = rack_transfer_job
def run(self):
self.reset()
self.add_info('Write rack transfer job section ...')
self.__check_input()
if not self.has_errors():
self.return_value = self.__write_section()
self.add_info('Section completed.')
def __check_input(self):
# Checks the initialisation values.
self.add_debug('Check input ...')
self._check_input_class('step number', self.step_number, int)
self._check_input_class('rack transfer job', self.rack_transfer_job,
RackSampleTransferJob)
def __write_section(self):
# Writes the section.
self.add_debug('Write section ...')
prst = self.rack_transfer_job.planned_rack_sample_transfer
head_line = self.HEAD_LINE % (self.step_number)
from_line = self.FROM_LINE % (self.rack_transfer_job.source_rack.barcode)
to_line = self.TO_LINE % (self.rack_transfer_job.target_rack.barcode)
volume_line = self.VOLUME_LINE % (prst.volume \
* VOLUME_CONVERSION_FACTOR)
translation_type = RackSectorTranslator.get_translation_behaviour(
source_shape=self.rack_transfer_job.source_rack.rack_shape,
target_shape=self.rack_transfer_job.target_rack.rack_shape,
number_sectors=prst.number_sectors)
if prst.number_sectors > 1:
if not translation_type == RackSectorTranslator.MANY_TO_ONE:
src_addition = self.SECTOR_ADDITION \
% (prst.source_sector_index + 1)
from_line += src_addition
if not translation_type == RackSectorTranslator.ONE_TO_MANY:
trg_addition = self.SECTOR_ADDITION \
% (prst.target_sector_index + 1)
to_line += trg_addition
paragraph = head_line + (2 * LINEBREAK_CHAR)
paragraph += (from_line + LINEBREAK_CHAR)
paragraph += (to_line + LINEBREAK_CHAR)
paragraph += (volume_line + LINEBREAK_CHAR)
paragraph += (2 * LINEBREAK_CHAR) # spacer
return paragraph
def create_rack_sector_stream(job_map, tool):
"""
Creates a stream for containing the rack sector info section for the
passed transfer jobs. The jobs are sorted by the map key before writing.
:Note: This function does almost the same like the
:class:`RackSampleTransferWriter`. However there are no checks and transfer
executions in this function.
:param job_map: The rack transfer jobs mapped onto a key that can be used
for sorting.
:type job_map: :class:`dict`
:return: stream
"""
stream = StringIO()
indices = job_map.keys()
indices.sort()
rack_transfer_counter = 0
result = stream
for i in indices:
rack_transfer_counter += 1
writer = RackSampleTransferSectionWriter(rack_transfer_counter,
job_map[i],
parent=tool)
paragraph = writer.get_result()
if paragraph is None:
result = None
break
stream.write(paragraph)
if not result is None:
stream.seek(0)
return result
class SerialWriterExecutorTool(BaseTool):
"""
A abstract base class for tools that shall print or execute worklists
of a series.
The distinction works via mode (see :attr:`MODE_EXECUTE` and
:attr:`MODE_PRINT_WORKLISTS`). Execution mode requires a user to be set.
Printing modes silently ignores the user.
**Return Value:** a zip stream for for printing mode or executed worklists
for execution mode (can be overwritten)
"""
NAME = 'Serial Writer Executor'
#: Marks usage of execution mode.
MODE_EXECUTE = 'execute'
#: Marker for the usage of worklist printing mode.
MODE_PRINT_WORKLISTS = 'print'
#: All allowed modes (default: execute and print).
_MODES = [MODE_EXECUTE, MODE_PRINT_WORKLISTS]
#: This placeholder is used to mark streams and executed items for
#: rack sample transfer jobs.
_RACK_SAMPLE_TRANSFER_MARKER = 'rack_sample_transfer'
def __init__(self, mode, user=None, parent=None):
"""
Constructor.
:param mode: :attr:`MODE_EXECUTE` or :attr:`MODE_PRINT_WORKLISTS`
:type mode: str
:param user: The user who conducts the DB update (required for
execution mode).
:type user: :class:`thelma.entities.user.User`
:default user: *None*
"""
BaseTool.__init__(self, parent=parent)
#: Print or execute?
self.mode = mode
#: Required for execution mode.
self.user = user
#: The transfer jobs mapped onto job indices.
self._transfer_jobs = None
#: The worklists for each rack sample transfer job index.
self._rack_transfer_worklists = None
#: The indices of all rack sample transfer jobs.
self.__rack_transfer_indices = None
def reset(self):
BaseTool.reset(self)
self._transfer_jobs = dict()
self._rack_transfer_worklists = dict()
self.__rack_transfer_indices = set()
def run(self):
self.reset()
self._check_input()
if not self.has_errors():
self._create_transfer_jobs()
if not self.has_errors():
self.__get_rack_transfer_job_indices()
if not self.has_errors():
if self.mode == self.MODE_PRINT_WORKLISTS:
self.__print_worklists()
else:
self._execute_worklists()
@classmethod
def create_writer(cls, **kw):
"""
Factory method returning a serial writer/executor in writer mode.
"""
return cls(mode=cls.MODE_PRINT_WORKLISTS, **kw)
@classmethod
def create_executor(cls, user, **kw):
"""
Factory method returning a serial writer/executor in execution mode.
"""
return cls(mode=cls.MODE_EXECUTE, user=user, **kw)
def _check_input(self):
"""
Checks the input values.
"""
if self._check_input_class('mode', self.mode, str):
if not self.mode in self._MODES:
msg = 'Unexpected mode: %s. Allowed modes: %s.' % (self.mode,
', '.join(self._MODES))
self.add_error(msg)
if not self.has_errors():
if self.mode == self.MODE_EXECUTE:
self._check_input_class('user', self.user, User)
def _create_transfer_jobs(self):
"""
The transfer jobs must be mapped onto job indices. For rack sample
transfers you also have to record the worklist name
(:attr:`_rack_transfer_worklists`).
"""
raise NotImplementedError('Abstract method.')
def __get_rack_transfer_job_indices(self):
# Rack sample transfer job need be treated differently (they do not
# allow for CSV worklists and the executed items are single liquid
# transfers instead of worklists).
for job_index, transfer_job in self._transfer_jobs.iteritems():
if isinstance(transfer_job, RackSampleTransferJob):
self.__rack_transfer_indices.add(job_index)
def __print_worklists(self):
# Called in printing mode (:attr:`MODE_PRINT_WORKLISTS`). Non-rack
# worklist for the same worklist label are merged by default.
stream_map = self.__run_serial_writer()
if not self.has_errors():
merge_map = self._merge_streams(stream_map)
rack_transfer_stream = self._get_rack_transfer_stream(stream_map)
if not self.has_errors():
file_map = self._get_file_map(merge_map, rack_transfer_stream)
if not self.has_errors():
zip_stream = StringIO()
create_zip_archive(zip_stream, file_map)
self.return_value = zip_stream
self.add_info('Serial working print completed.')
def __run_serial_writer(self):
# Runs the seiral worklist writer.
writer = _SeriesWorklistWriter(self._transfer_jobs, parent=self)
stream_map = writer.get_result()
if stream_map is None:
msg = 'Error when running serial worklist printer.'
self.add_error(msg)
result = None
else:
result = stream_map
return result
def _merge_streams(self, stream_map):
"""
By default, streams for the same worklist are merged.
"""
sorted_streams = dict()
for job_index, stream in stream_map.iteritems():
if job_index in self.__rack_transfer_indices:
continue
transfer_job = self._transfer_jobs[job_index]
worklist_label = transfer_job.planned_worklist.label
if sorted_streams.has_key(worklist_label):
worklist_map = sorted_streams[worklist_label]
else:
worklist_map = dict()
sorted_streams[worklist_label] = worklist_map
worklist_map[job_index] = stream
merged_streams = dict()
for worklist_label, worklist_map in sorted_streams.iteritems():
merged_stream = merge_csv_streams(worklist_map)
merged_streams[worklist_label] = merged_stream
return merged_streams
def _get_rack_transfer_stream(self, stream_map):
"""
Returns the stream for the rack sample transfer jobs stream.
There should be only one stream.
"""
rack_transfer_streams = dict()
for job_index in self.__rack_transfer_indices:
if not stream_map.has_key(job_index):
continue
rack_transfer_streams[job_index] = stream_map[job_index]
if len(rack_transfer_streams) > 1:
msg = 'There is more than one rack transfer stream in the ' \
'stream map!'
self.add_error(msg)
return None
if len(rack_transfer_streams) > 0:
return rack_transfer_streams.values()[0]
else:
return None
def _get_file_map(self, merged_stream_map, rack_sample_transfer_stream):
"""
Returns a map containing the file name (key) for each the merged
stream (value).
The merged stream map is derived from :func:`_merge_streams`.
By default, streams are mapped on the worklist label (merged).
The rack sample transfer stream can be *None*.
"""
raise NotImplementedError('Abstract method.')
def _execute_worklists(self):
"""
Called in execution mode (:attr:`MODE_PRINT_WORKLISTS`).
The executed rack sample transfers have to be summarized to jobs
first.
"""
executed_worklists = self._get_executed_worklists()
if executed_worklists is not None:
self.return_value = executed_worklists
self.add_info('Serial worklist execution completed.')
def _get_executed_worklists(self):
"""
Runs the :class:`SeriesExecutor`.
"""
executor = SeriesExecutor(self._transfer_jobs, self.user,
parent=self)
executed_items = executor.get_result()
if executed_items is None:
msg = 'Error when running serial worklist executor!'
self.add_error(msg)
return None
else:
return self.__get_executed_worklists(executed_items)
def __get_executed_worklists(self, executed_items):
# For rack sample job, the executor returns no worklists but executed
# liquid transfer. The worklists for the job must be registered in
# the :attr:`_rack_transfer_worklists` map by the subclass
# implementation.
executed_worklists = dict()
other_worklists = []
for job_index, executed_item in executed_items.iteritems():
if not job_index in self.__rack_transfer_indices:
other_worklists.append(executed_item)
continue
worklist = self._rack_transfer_worklists[job_index]
worklist_label = worklist.label
if executed_worklists.has_key(worklist_label):
executed_worklist = executed_worklists[worklist_label]
else:
executed_worklist = ExecutedWorklist(worklist)
executed_worklists[worklist_label] = executed_worklist
elt = executed_items[job_index]
executed_worklist.executed_liquid_transfers.append(elt)
return other_worklists + executed_worklists.values()
| StarcoderdataPython |
142151 | <reponame>ABM-Community-Ports/droidboot_device_planet-cosmocom<filename>scripts/dct/obj/GpioObj.py
#! /usr/bin/python
# -*- coding: utf-8 -*-
import re
import os
import sys
import string
import ConfigParser
import xml.dom.minidom
from data.GpioData import GpioData
from data.EintData import EintData
from ModuleObj import ModuleObj
import ChipObj
from utility.util import compare
from utility.util import sorted_key
from utility.util import log
from utility.util import LogLevel
class GpioObj(ModuleObj):
def __init__(self):
ModuleObj.__init__(self,'cust_gpio_boot.h', 'cust_gpio.dtsi')
self.__fileName = 'cust_gpio_usage.h'
self.__filePinfunc = '%s-pinfunc.h' %(ModuleObj.get_chipId().lower())
self.__filePinCtrl = 'pinctrl-mtk-%s.h' %(ModuleObj.get_chipId().lower())
self.__fileScp = 'cust_scp_gpio_usage.h'
self.__fileMap = 'cust_gpio_usage_mapping.dtsi'
self.__drvCur = False
self.__gpio_column_enable = True
def get_cfgInfo(self):
cp = ConfigParser.ConfigParser(allow_no_value=True)
cp.read(ModuleObj.get_cmpPath())
# get GPIO_FREQ section
keys= cp.options('GPIO_FREQ')
for key in keys:
value = cp.get('GPIO_FREQ', key)
GpioData._freqMap[key] = value
# get GPIO_MODE section
keys = cp.options('GPIO_MODE')
for key in keys:
value = cp.get('GPIO_MODE', key)
GpioData._specMap[key] = value
GpioData._mapList = cp.options('GPIO_VARIABLES_MAPPING')
cp.read(ModuleObj.get_figPath())
ops = cp.options('GPIO')
for op in ops:
value = cp.get('GPIO', op)
list = re.split(r' +|\t+', value)
tmp_list = list[0:len(list)-2]
temp = []
for item in tmp_list:
str = item[6:len(item)-1]
temp.append(str)
GpioData._modeMap[op] = temp
data = GpioData()
data.set_smtNum(string.atoi(list[len(list)-1]))
ModuleObj.set_data(self, op.lower(), data)
if cp.has_option('Chip Type', 'GPIO_COLUMN_ENABLE'):
flag = cp.get('Chip Type', 'GPIO_COLUMN_ENABLE')
if flag == '0':
self.__gpio_column_enable = False
def read(self, node):
nodes = node.childNodes
for node in nodes:
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
if cmp(node.nodeName, 'count') == 0:
GpioData._count = string.atoi(node.childNodes[0].nodeValue)
continue
eintNode = node.getElementsByTagName('eint_mode')
defmNode = node.getElementsByTagName('def_mode')
modsNode = node.getElementsByTagName('mode_arr')
inpeNode = node.getElementsByTagName('inpull_en')
inpsNode = node.getElementsByTagName('inpull_selhigh')
defdNode = node.getElementsByTagName('def_dir')
diriNode = node.getElementsByTagName('in')
diroNode = node.getElementsByTagName('out')
outhNode = node.getElementsByTagName('out_high')
var0Node = node.getElementsByTagName('varName0')
var1Node = node.getElementsByTagName('varName1')
var2Node = node.getElementsByTagName('varName2')
smtNode = node.getElementsByTagName('smt')
iesNode = node.getElementsByTagName('ies')
drvCurNode = node.getElementsByTagName('drv_cur')
num = string.atoi(node.nodeName[4:])
if num >= len(ModuleObj.get_data(self)):
break
data = ModuleObj.get_data(self)[node.nodeName]
if len(eintNode):
flag = False
if cmp(eintNode[0].childNodes[0].nodeValue, 'true') == 0:
flag = True
data.set_eintMode(flag)
if len(defmNode):
data.set_defMode(string.atoi(defmNode[0].childNodes[0].nodeValue))
if len(modsNode) != 0 and len(modsNode[0].childNodes) != 0:
str = modsNode[0].childNodes[0].nodeValue
temp_list = []
for i in range(0, len(str)):
temp_list.append(str[i])
data.set_modeVec(temp_list)
if len(inpeNode):
flag = False
if cmp(inpeNode[0].childNodes[0].nodeValue, 'true') == 0:
flag = True
data.set_inpullEn(flag)
if len(inpsNode):
flag = False
if cmp(inpsNode[0].childNodes[0].nodeValue, 'true') == 0:
flag = True
data.set_inpullSelHigh(flag)
if len(defdNode):
data.set_defDir(defdNode[0].childNodes[0].nodeValue)
if len(diriNode) != 0 and len(diriNode[0].childNodes) != 0:
flag = False
if cmp(diriNode[0].childNodes[0].nodeValue, 'true') == 0:
flag = True
data.set_inEn(flag)
if len(diroNode) != 0 and len(diroNode[0].childNodes) != 0:
flag = False
if cmp(diroNode[0].childNodes[0].nodeValue, 'true') == 0:
flag = True
data.set_outEn(flag)
if len(outhNode):
flag = False
if cmp(outhNode[0].childNodes[0].nodeValue, 'true') == 0:
flag = True
data.set_outHigh(flag)
temp_list= []
if len(var0Node) != 0 and len(var0Node[0].childNodes) != 0:
temp_list.append(var0Node[0].childNodes[0].nodeValue)
if len(var1Node) != 0 and len(var1Node[0].childNodes) != 0:
temp_list.append(var1Node[0].childNodes[0].nodeValue)
if len(var2Node) != 0 and len(var2Node[0].childNodes) != 0:
temp_list.append(var2Node[0].childNodes[0].nodeValue)
data.set_varNames(temp_list)
if len(smtNode):
flag = False
if cmp(smtNode[0].childNodes[0].nodeValue, 'true') == 0:
flag = True
data.set_smtEn(flag)
if len(iesNode):
flag = False
if cmp(iesNode[0].childNodes[0].nodeValue, 'true') == 0:
flag = True
data.set_iesEn(flag)
if len(drvCurNode) != 0 and len(drvCurNode[0].childNodes) != 0:
self.__drvCur = True
data.set_drvCur(drvCurNode[0].childNodes[0].nodeValue)
ModuleObj.set_data(self, node.nodeName, data)
return True
def get_gpioData(self, idx):
if idx >= GpioData._count or idx < 0:
return None
key = 'gpio%s' %(idx)
return ModuleObj.get_data(self)[key]
def parse(self, node):
self.get_cfgInfo()
self.read(node)
def isMuxMode(self, key, index, modIdx):
mode_name = GpioData.get_modeName(key, index)
modIdx.append(index)
if mode_name.find('//') != -1:
return True
return False
def gen_files(self):
ModuleObj.gen_files(self)
self.gen_cFile()
self.gen_specFiles()
def gen_spec(self, para):
if para == 'gpio_usage_h':
self.gen_cFile()
elif para == 'gpio_boot_h':
self.gen_hFile()
elif para == 'gpio_dtsi':
self.gen_dtsiFile()
elif para == 'scp_gpio_usage_h':
self.gen_scpUsage()
elif para == 'pinctrl_h':
self.gen_pinCtrl()
elif para == 'pinfunc_h':
self.gen_pinFunc()
elif para == 'gpio_usage_mapping_dtsi':
self.gen_mapDtsi()
def gen_cFile(self):
gen_str = ''
fp = open(os.path.join(ModuleObj.get_genPath(), self.__fileName), 'w')
gen_str += ModuleObj.writeComment()
gen_str += ModuleObj.writeHeader(self.__fileName)
gen_str += self.fill_cFile()
gen_str += ModuleObj.writeTail(self.__fileName)
fp.write(gen_str)
fp.close()
def gen_specFiles(self):
self.gen_pinFunc()
self.gen_pinCtrl()
self.gen_scpUsage()
self.gen_mapDtsi()
def gen_pinFunc(self):
gen_str = ''
fp = open(os.path.join(ModuleObj.get_genPath(), self.__filePinfunc), 'w')
gen_str += ModuleObj.writeComment()
gen_str += ModuleObj.writeHeader(self.__filePinfunc)
gen_str += self.fill_pinfunc_hFile()
gen_str += ModuleObj.writeTail(self.__filePinfunc)
fp.write(gen_str)
fp.close()
def gen_pinCtrl(self):
gen_str = ''
fp = open(os.path.join(ModuleObj.get_genPath(), self.__filePinCtrl), 'w')
gen_str += ModuleObj.writeComment()
gen_str += ModuleObj.writeHeader(self.__filePinCtrl)
gen_str += self.fill_pinctrl_hFile()
gen_str += ModuleObj.writeTail(self.__filePinCtrl)
fp.write(gen_str)
fp.close()
def gen_scpUsage(self):
gen_str = ''
fp = open(os.path.join(ModuleObj.get_genPath(), self.__fileScp), 'w')
gen_str += ModuleObj.writeComment()
gen_str += ModuleObj.writeHeader(self.__fileScp)
gen_str += self.fill_cFile()
gen_str += ModuleObj.writeTail(self.__fileScp)
fp.write(gen_str)
fp.close()
def gen_mapDtsi(self):
gen_str = ''
fp = open(os.path.join(ModuleObj.get_genPath(), self.__fileMap), 'w')
gen_str += ModuleObj.writeComment()
gen_str += self.fill_mapping_dtsiFile()
fp.write(gen_str)
fp.close()
def fill_hFile(self):
gen_str = '''//Configuration for GPIO SMT(Schmidt Trigger) Group output start\n'''
temp_list = []
for key in sorted_key(ModuleObj.get_data(self).keys()):
#for value in ModuleObj.get_data(self).values():
value = ModuleObj.get_data(self)[key]
num = value.get_smtNum()
if num in temp_list or num < 0:
continue
else:
temp_list.append(num)
if value.get_smtEn():
gen_str += '''#define GPIO_SMT_GROUP_%d\t\t1\n''' %(num)
else:
gen_str += '''#define GPIO_SMT_GROUP_%d\t\t0\n''' %(num)
gen_str += '''\n\n'''
sorted_list = sorted(ModuleObj.get_data(self).keys(), key = compare)
for key in sorted_list:
value = ModuleObj.get_data(self)[key]
if self.is_i2cPadPin(value.get_modeName(key, value.get_defMode())):
value.set_inpullEn(False)
value.set_outHigh(False)
value.set_inpullSelHigh(False)
gen_str += '''//Configuration for %s\n''' %(key.upper())
mode_name = GpioData.get_modeName(key, value.get_defMode())
val = ''
if mode_name != '':
if self.__gpio_column_enable:
flag = False
if mode_name.find('//') != -1:
flag = True
if flag:
if value.get_modeVec()[value.get_defMode()] == '1':
val = str(value.get_defMode())
elif value.get_modeVec()[value.get_defMode()] == '2':
val = str(value.get_defMode() + GpioData._modNum)
else:
val = str(value.get_defMode())
else:
val = str(value.get_defMode())
if len(val) < 2:
val = '0' + val
pull_en = ''
if value.get_inPullEn():
pull_en = 'ENABLE'
else:
pull_en = 'DISABLE'
pull_sel = ''
if value.get_inPullSelHigh():
pull_sel = 'UP'
else:
pull_sel = 'DOWN'
out_high = ''
if value.get_outHigh():
out_high = 'ONE'
else:
out_high = 'ZERO'
smt_en = ''
if value.get_smtEn():
smt_en = 'ENABLE'
else:
smt_en= 'DISABLE'
ies_en = ''
if value.get_iesEn():
ies_en = 'ENABLE'
else:
ies_en = 'DISABLE'
gen_str += '''#define %s_MODE\t\t\tGPIO_MODE_%s\n''' %(key.upper(), val)
gen_str += '''#define %s_DIR\t\t\tGPIO_DIR_%s\n''' %(key.upper(), value.get_defDir())
gen_str += '''#define %s_PULLEN\t\tGPIO_PULL_%s\n''' %(key.upper(), pull_en)
gen_str += '''#define %s_PULL\t\t\tGPIO_PULL_%s\n''' %(key.upper(), pull_sel)
gen_str += '''#define %s_DATAOUT\t\tGPIO_OUT_%s\n''' %(key.upper(), out_high)
gen_str += '''#define %s_SMT\t\t\tGPIO_SMT_%s\n''' %(key.upper(), smt_en)
gen_str += '''#define %s_IES\t\t\tGPIO_IES_%s\n''' %(key.upper(), ies_en)
if self.__drvCur:
drv_cur = 'DRV_UNSUPPORTED'
if value.get_drvCur() != '':
drv_cur = value.get_drvCur()
gen_str += '''#define %s_DRV\t\t\tGPIO_%s\n''' %(key.upper(), drv_cur)
gen_str += '''\n'''
return gen_str
def is_i2cPadPin(self, name):
if re.match(r'^SCL\d+$', name) or re.match(r'^SDA\d+$', name):
return True
return False
def fill_cFile(self):
gen_str = ''
for key in sorted_key(ModuleObj.get_data(self).keys()):
value = ModuleObj.get_data(self)[key]
if 'GPIO_INIT_NO_COVER' in value.get_varNames():
continue
for varName in value.get_varNames():
gen_str += '''#define %s\t\t\t(%s | 0x80000000)\n''' %(varName.upper(), key.upper())
if value.get_eintMode():
gen_str += '''#define %s_M_EINT\t\tGPIO_MODE_00\n''' % (varName)
if self.__gpio_column_enable:
temp_list = []
for item in GpioData._specMap.keys():
regExp = '[_A-Z0-9:]*%s[_A-Z0-9:]*' %(item.upper())
pat = re.compile(regExp)
for i in range(0, GpioData._modNum):
list = value.get_modeVec()
mode_name = GpioData.get_modeName(key, i)
if list[i] == '1':
if mode_name.find('//') != -1:
mode_name = mode_name.split('//')[0]
elif list[i] == '2':
if mode_name.find('//') != -1:
mode_name = mode_name.split('//')[1]
if pat.match(mode_name):
if cmp(item, 'eint') == 0 and ((value.get_eintMode() or mode_name.find('MD_EINT') != -1)):
continue
gen_str += '''#define %s%s\t\tGPIO_MODE_0%d\n''' % (varName.upper(), GpioData._specMap[item].upper(), i)
temp_list.append(i)
break
if not value.get_eintMode():
list = value.get_modeVec()
for i in range(0,GpioData._modNum):
mode_name = GpioData.get_modeName(key, i)
if list[i] == '0':
continue
elif list[i] == '1':
if mode_name.find('//') != -1:
mode_name = mode_name.split('//')[0]
elif list[i] == '2':
if mode_name.find('//') != -1:
mode_name = mode_name.split('//')[1]
if not i in temp_list:
gen_str += '''#define %s_M_%s\t\tGPIO_MODE_0%d\n''' %(varName, re.sub(r'\d{0,3}$', '', mode_name), i)
regExp = r'CLKM\d'
pat = re.compile(regExp)
for i in range(0, GpioData._modNum):
mode = GpioData.get_modeName(key, i)
if pat.match(mode):
gen_str += '''#define %s_CLK\t\tCLK_OUT%s\n''' % (varName, mode[4:])
temp = ''
if varName in GpioData._freqMap.keys():
temp = GpioData._freqMap[varName]
else:
temp = 'GPIO_CLKSRC_NONE'
gen_str += '''#define %s_FREQ\t\t%s\n''' % (varName, temp)
else:
mode_name = GpioData.get_modeName(key, value.get_defMode())
bmatch = False
for item in GpioData._specMap.keys():
regExp = '[_A-Z0-9:]*%s[_A-Z0-9:]*' %(item.upper())
pat = re.compile(regExp)
if pat.match(mode_name):
if cmp(item, 'eint') == 0 and ((value.get_eintMode() or mode_name.find('MD_EINT') != -1)):
continue
gen_str += '''#define %s%s\t\tGPIO_MODE_0%d\n''' % (varName.upper(), GpioData._specMap[item].upper(), value.get_defMode())
bmatch = True
if not bmatch:
gen_str += '''#define %s_M_%s\t\tGPIO_MODE_0%d\n''' % (varName.upper(), re.sub(r'\d{0,3}$', '', mode_name), value.get_defMode())
if value.get_defMode() != 0:
mode_name = GpioData.get_modeName(key, 0)
gen_str += '''#define %s_M_%s\t\tGPIO_MODE_0%d\n''' % (varName.upper(), re.sub(r'\d{0,3}$', '', mode_name), 0)
gen_str += '''\n'''
return gen_str
def fill_dtsiFile(self):
gen_str = '''&pio {\n\n'''
gen_str += '''\tgpio_pins_default: gpiodef{\n\t};\n\n'''
#sorted_list = sorted(ModuleObj.get_data(self).keys(), key = compare)
for key in sorted_key(ModuleObj.get_data(self).keys()):
#for key in sorted_list:
value = ModuleObj.get_data(self)[key]
gen_str += '''\t%s: gpio@%s {\n''' %(key.lower(), key[4:])
gen_str += '''\t\tpins_cmd_dat {\n'''
mode = value.get_defMode()
mode_name = GpioData.get_modeName(key, mode)
if self.__gpio_column_enable:
mode_val = value.get_modeVec()[mode]
if mode_val == '1':
if mode_name.find('//') != -1:
mode_name = mode_name.split('//')[0]
elif mode_val == '2':
if mode_name.find('//') != -1:
mode_name = mode_name.split('//')[1]
gen_str += '''\t\t\tpins = <PINMUX_GPIO%s__FUNC_%s>;\n''' %(key[4:], mode_name)
gen_str += '''\t\t\tslew-rate = <%d>;\n''' %(value.ge_defDirInt())
temp = ''
if not value.get_inPullEn():
temp = 'bias-disable;'
gen_str += '''\t\t\t%s\n''' %(temp)
if value.get_inPullSelHigh():
temp = '11'
else:
temp = '00'
gen_str += '''\t\t\tbias-pull-down = <%s>;\n''' %(temp)
if value.get_outHigh():
temp = 'high'
else:
temp = 'low'
gen_str += '''\t\t\toutput-%s;\n''' %(temp)
gen_str += '''\t\t\tinput-schmitt-enable = <%d>;\n''' %(value.get_smtEn())
gen_str += '''\t\t};\n'''
gen_str += '''\t};\n'''
gen_str += '''};\n\n'''
gen_str += '''&gpio {\n'''
lineLen = 0
gen_str += '''\tpinctrl-names = "default",'''
lineLen += 30
for i in range(0, GpioData._count-1):
gen_str += '''"gpio%d",''' %(i)
if i < 10:
lineLen += 8
elif i < 100:
lineLen += 9
elif i >= 100:
lineLen += 10
if lineLen > 100:
gen_str += '''\n'''
lineLen = 0
gen_str += '''"gpio%d";\n''' %(GpioData._count-1)
gen_str += '''\tpinctrl-0 = <&gpio_pins_default>;\n'''
for i in range(1, GpioData._count):
gen_str += '''\tpinctrl-%d = <&gpio%d>;\n''' %(i, i-1)
gen_str += '''\n'''
gen_str += '''\tstatus = \"okay\";\n'''
gen_str += '''};\n'''
return gen_str
def fill_pinfunc_hFile(self):
gen_str = '''#include \"mt65xx.h\"\n\n'''
#sorted_list = sorted(ModuleObj.get_data(self).keys(), key = compare)
for key in sorted_key(ModuleObj.get_data(self).keys()):
#for key in sorted_list:
value = ModuleObj.get_data(self)[key]
for i in range(0, GpioData._modNum):
mode_name = GpioData.get_modeName(key, i)
if mode_name != '':
lst = []
if mode_name.find('//') != -1:
lst = mode_name.split('//')
else:
lst.append(mode_name)
for j in range(0, len(lst)):
gen_str += '''#define PINMUX_GPIO%s__FUNC_%s (MTK_PIN_NO(%s) | %d)\n''' %(key[4:], lst[j], key[4:], (i + j*8))
gen_str += '''\n'''
gen_str += '''\n'''
return gen_str
def fill_pinctrl_hFile(self):
gen_str = '''#include <linux/pinctrl/pinctrl.h>\n'''
gen_str += '''#include <pinctrl-mtk-common.h>\n\n'''
gen_str += '''static const struct mtk_desc_pin mtk_pins_%s[] = {\n''' %(ModuleObj.get_chipId().lower())
#sorted_list = sorted(ModuleObj.get_data(self).keys(), key = compare)
for key in sorted_key(ModuleObj.get_data(self).keys()):
#for key in sorted_list:
value = ModuleObj.get_data(self)[key]
gen_str += '''\tMTK_PIN(\n'''
gen_str += '''\t\tPINCTRL_PIN(%s, \"%s\"),\n''' %(key[4:], key.upper())
gen_str += '''\t\tNULL, \"%s\",\n''' %(ModuleObj.get_chipId().lower())
gen_str += '''\t\tMTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT)'''
for i in range(0, GpioData._modNum):
mode_name = GpioData.get_modeName(key, i)
if mode_name != '':
lst = []
if mode_name.find('//') != -1:
lst = mode_name.split('//')
else:
lst.append(mode_name)
for j in range(0, len(lst)):
gen_str += ''',\n\t\tMTK_FUNCTION(%d, "%s")''' %(i + j * 8, lst[j])
gen_str += '''\n\t),\n'''
gen_str += '''};\n'''
return gen_str
def fill_mapping_dtsiFile(self):
gen_str = '''&gpio_usage_mapping {\n'''
#sorted_list = sorted(ModuleObj.get_data(self).keys(), key = compare)
for key in sorted_key(ModuleObj.get_data(self).keys()):
#for key in sorted_list:
value = ModuleObj.get_data(self)[key]
for varName in value.get_varNames():
if varName != '' and varName.lower() in GpioData._mapList:
gen_str += '''\t%s = <%s>;\n''' %(varName, key[4:])
gen_str += '''};\n'''
return gen_str
def set_eint_map_table(self, map_table):
GpioData.set_eint_map_table(map_table)
def fill_init_default_dtsiFile(self):
return ''
class GpioObj_whitney(GpioObj):
def __init__(self):
GpioObj.__init__(self)
def parse(self, node):
log(LogLevel.info, 'GpioObj_whitney parse')
GpioObj.parse(self, node)
def gen_files(self):
GpioObj.gen_files(self)
def gen_spec(self, para):
GpioObj.gen_spec(self, para)
def is_i2cPadPin(self, name):
return False
class GpioObj_MT6759(GpioObj):
def __init__(self):
GpioObj.__init__(self)
def parse(self, node):
GpioObj.parse(self, node)
def gen_files(self):
GpioObj.gen_files(self)
def gen_spec(self, para):
GpioObj.gen_spec(self, para)
def is_i2cPadPin(self, name):
return False
def fill_mapping_dtsiFile(self):
gen_str = '''&gpio_usage_mapping {\n'''
#sorted_list = sorted(ModuleObj.get_data(self).keys(), key = compare)
for key in sorted_key(ModuleObj.get_data(self).keys()):
#for key in sorted_list:
value = ModuleObj.get_data(self)[key]
for varName in value.get_varNames():
if varName != '' and varName.lower() in GpioData._mapList:
gen_str += '''\t%s = <&pio %s 0>;\n''' %(varName, key[4:])
gen_str += '''};\n'''
return gen_str
class GpioObj_MT6739(GpioObj_MT6759):
def __init__(self):
GpioObj_MT6759.__init__(self)
def get_eint_index(self, gpio_index):
if string.atoi(gpio_index) in GpioData._map_table.keys():
return GpioData._map_table[string.atoi(gpio_index)]
return -1
def fill_pinctrl_hFile(self):
gen_str = '''#include <linux/pinctrl/pinctrl.h>\n'''
gen_str += '''#include <pinctrl-mtk-common.h>\n\n'''
gen_str += '''static const struct mtk_desc_pin mtk_pins_%s[] = {\n''' % (ModuleObj.get_chipId().lower())
# sorted_list = sorted(ModuleObj.get_data(self).keys(), key = compare)
for key in sorted_key(ModuleObj.get_data(self).keys()):
# for key in sorted_list:
value = ModuleObj.get_data(self)[key]
gen_str += '''\tMTK_PIN(\n'''
gen_str += '''\t\tPINCTRL_PIN(%s, \"%s\"),\n''' % (key[4:], key.upper())
gen_str += '''\t\tNULL, \"%s\",\n''' % (ModuleObj.get_chipId().lower())
eint_index = self.get_eint_index(key[4:])
if eint_index != -1:
gen_str += '''\t\tMTK_EINT_FUNCTION(%d, %d)''' % (0, eint_index)
else:
gen_str += '''\t\tMTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT)'''
for i in range(0, GpioData._modNum):
mode_name = GpioData.get_modeName(key, i)
if mode_name != '':
lst = []
if mode_name.find('//') != -1:
lst = mode_name.split('//')
else:
lst.append(mode_name)
for j in range(0, len(lst)):
gen_str += ''',\n\t\tMTK_FUNCTION(%d, "%s")''' % (i + j * 8, lst[j])
gen_str += '''\n\t),\n'''
gen_str += '''};\n'''
return gen_str
# remove dct in lk
class GpioObj_MT6771(GpioObj_MT6739):
def fill_init_default_dtsiFile(self):
gen_str = '''\n&gpio{\n'''
gen_str += '''\tgpio_init_default = '''
for key in sorted_key(ModuleObj.get_data(self).keys()):
value = ModuleObj.get_data(self)[key]
# if var name contains GPIO_INIT_NO_COVER, the device tree info of the pin in cust.dtsi file would not gen
if "GPIO_INIT_NO_COVER" in value.get_varNames():
continue
num = string.atoi(key[4:])
defMode = value.get_defMode()
dout = 1 if value.get_outHigh() else 0
pullEn = 1 if value.get_inPullEn() else 0
pullSel = 1 if value.get_inPullSelHigh() else 0
smtEn = 1 if value.get_smtEn() else 0
gen_str += '''<%d %d %d %d %d %d %d>,\n\t\t''' % (num, defMode, value.ge_defDirInt(), dout, pullEn, pullSel, smtEn)
gen_str = gen_str[0: len(gen_str) - 4]
gen_str += ';'
gen_str += '''\n};\n'''
return gen_str
class GpioObj_MT6763(GpioObj_MT6759):
def fill_init_default_dtsiFile(self):
gen_str = '''\n&gpio{\n'''
gen_str += '''\tgpio_init_default = '''
for key in sorted_key(ModuleObj.get_data(self).keys()):
value = ModuleObj.get_data(self)[key]
num = string.atoi(key[4:])
defMode = value.get_defMode()
dout = 1 if value.get_outHigh() else 0
pullEn = 1 if value.get_inPullEn() else 0
pullSel = 1 if value.get_inPullSelHigh() else 0
smtEn = 1 if value.get_smtEn() else 0
gen_str += '''<%d %d %d %d %d %d %d>,\n\t\t''' % (num, defMode, value.ge_defDirInt(), dout, pullEn, pullSel, smtEn)
gen_str = gen_str[0: len(gen_str) - 4]
gen_str += ';'
gen_str += '''\n};\n'''
return gen_str | StarcoderdataPython |
16422 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import unittest
import numpy as np
import numpy.testing as npt
import pandas as pd
from riip.material import RiiMaterial
class KnownValues(unittest.TestCase):
known_values = [
(1, [0.0 for _ in range(17)], 1.0, 1.0),
(1, [1.0 if _ < 3 else 0 for _ in range(17)], 0.5, np.sqrt(5 / 3)),
(1, [0.5 if _ < 3 else 0 for _ in range(17)], 1.0, np.sqrt(6.5 / 3)),
(2, [0.0 for _ in range(17)], 1.0, 1.0),
(2, [1.0 if _ < 3 else 0 for _ in range(17)], 0.5, np.sqrt(5 / 3)),
(2, [0.5 if _ < 3 else 0 for _ in range(17)], 1.0, np.sqrt(2.5)),
(3, [0.0 for _ in range(17)], 1.0, 0.0),
(3, [1.0 if _ < 3 else 0 for _ in range(17)], 0.5, np.sqrt(1.5)),
(
3,
[0.5 if _ < 3 else 0 for _ in range(17)],
2.0,
np.sqrt((1 + np.sqrt(2)) / 2),
),
(4, [0.0 for _ in range(17)], 0.5, 0.0),
(4, [1.0, 1.0, 2.0, 3.0, 2.0] + [0 for _ in range(5, 17)], 2.0, np.sqrt(1 / 5)),
(
4,
[0.0 for _ in range(9)] + [0.5 for _ in range(9, 17)],
2.0,
np.sqrt(2 * np.sqrt(2)),
),
(5, [0.0 for _ in range(11)], 1.0, 0.0),
(5, [2.0 for _ in range(11)], 0.5, 4.5),
(5, [0.5 for _ in range(11)], 2.0, 0.5 + 2.5 * np.sqrt(2)),
(6, [0.0 for _ in range(11)], 1.0, 1.0),
(6, [0.5 for _ in range(11)], 2.0, 11.5),
(6, [0.2 for _ in range(11)], 0.25, 1.2 - 1 / 15.8),
(7, [0.0 for _ in range(6)], 0.5, 0.0),
(
7,
[1.0 for _ in range(6)],
np.sqrt(1.028),
3 + 1.028 + 1.028 ** 2 + 1.028 ** 3,
),
(7, [1.0, 0, 0, 0.5, 0.5, 0.5], 0.5, 1 + 21 / 2 ** 7),
(8, [0.0 for _ in range(4)], 0.5, 1.0),
(8, [0.1 for _ in range(4)], np.sqrt(0.2), np.sqrt(1.64 / 0.68)),
(8, [0.2, 0, 0, 0.2], 0.5, np.sqrt(1.5 / 0.75)),
(9, [0.0 for _ in range(6)], 0.5, 0.0),
(
9,
[1.0 for _ in range(6)],
np.sqrt(2),
np.sqrt(2 + (np.sqrt(2) - 1) / (4 - 2 * np.sqrt(2))),
),
(9, [1.0 for _ in range(6)], 2.0, np.sqrt(11 / 6)),
]
known_values_for_tabulated = [
(0, [[0.01 * i, 0.0, 0.0] for i in range(100)], 0.5, (0.0, 0.0)),
(0, [[0.01 * i, 0.02 * i, 0.0] for i in range(100)], 0.5, (1.0, 0.0)),
(0, [[0.01 * i, 1.3, 0.01 * i] for i in range(100)], 0.5, (1.3, 0.5)),
]
def test_dispersion_formula_known_values(self):
"""dispersion_formula should return function."""
for i, (formula, cs, wl, result) in enumerate(self.known_values):
catalog = pd.DataFrame(
{
"book": "",
"page": "",
"formula": [formula],
"tabulated": [""],
"wl_n_min": [0.25],
"wl_n_max": [2.0],
"wl_k_min": [0.25],
"wl_k_max": [2.0],
}
)
print(cs)
data = pd.DataFrame({"id": 0, "c": cs}).set_index("id")
material = RiiMaterial(0, catalog, data)
n = material.n(wl)
npt.assert_array_almost_equal(n, np.atleast_1d(result))
def test_dispersion_formula_for_tabulated(self):
"""dispersion_formula should return function."""
for i, (formula, wlnk, wl, result) in enumerate(
self.known_values_for_tabulated
):
_wlnk = np.asarray(wlnk)
wls = _wlnk[:, 0]
ns = _wlnk[:, 1]
ks = _wlnk[:, 2]
catalog = pd.DataFrame(
{
"book": "",
"page": "",
"formula": [formula],
"tabulated": ["nk"],
"num_n": 100,
"num_k": 100,
"wl_n_min": [0.25],
"wl_n_max": [2.0],
"wl_k_min": [0.25],
"wl_k_max": [2.0],
}
)
data = pd.DataFrame(
{"id": 0, "wl_n": wls, "n": ns, "wl_k": wls, "k": ks}
).set_index("id")
material = RiiMaterial(0, catalog, data)
print(material.n(wl), material.k(wl), result)
self.assertAlmostEqual(material.n(wl).item(), result[0])
self.assertAlmostEqual(material.k(wl).item(), result[1])
def test_dispersion_formula_exception(self):
catalog = pd.DataFrame(
{
"book": "",
"page": "",
"formula": [1],
"tabulated": [""],
"wl_n_min": [0.25],
"wl_n_max": [2.0],
"wl_k_min": [0.25],
"wl_k_max": [2.0],
}
)
data = pd.DataFrame({"id": 0, "c": list(range(17))}).set_index("id")
material = RiiMaterial(0, catalog, data)
with self.assertRaises(ValueError):
material.n(0.1)
with self.assertRaises(ValueError):
material.k(0.1)
with self.assertRaises(ValueError):
material.n(2.1)
with self.assertRaises(ValueError):
material.k(2.1)
with self.assertRaises(ValueError):
material.n(np.array([0.1 * i for i in range(21)]))
with self.assertRaises(ValueError):
material.k(np.array([0.1 * i for i in range(21)]))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
189356 | #!/usr/bin/env python3
"""Zig Zag.
Given an array A (distinct elements) of size N.
Rearrange the elements of array in zig-zag fashion.
The converted array should be in form a < b > c < d > e < f.
The relative order of elements is same in the output
i.e you have to iterate on the original array only.
Source:
https://practice.geeksforgeeks.org/problems/convert-array-into-zig-zag-fashion/0
"""
def zig_zag(items: list) ->list:
"""Rearrange the elements of a list in zig-zag fashion.
Changes to the list are made in place.
"""
less_than = True
for i in range(len(items)-1):
if (less_than) and items[i] > items[i+1]:
items[i], items[i+1] = items[i+1], items[i]
else:
if items[i] < items[i+1]:
items[i], items[i+1] = items[i+1], items[i]
less_than = bool(1 - less_than)
return items
def zig_zag_both(items: list) -> tuple:
"""Return original and zig-zap lists in a tuple."""
return (items[:], zig_zag(items),)
def main():
"""Main function to run zig-zag functions."""
print(zig_zag([4, 3, 7, 8, 6, 2, 1, ]))
print(zig_zag([1, 4, 3, 2, ]))
print(zig_zag_both([4, 3, 7, 8, 6, 2, 1, ]))
print(zig_zag_both([1, 4, 3, 2, ]))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1721945 | <filename>dags/daily_simple_stats.py
"""
# Simple Stats (Conversation Stats)
This dag is to process agent's analytics data from agent's interaction with dashboard.
## Source
* Database: Anayltics,
* Tables: messages
## Return
* Database: Stats,
* Tables: conversations
"""
import os
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
from utils.airflow_helper import get_environments
from utils.email_helper import email_notify
default_args = {
'owner': 'Jaekwan',
'depends_on_past': False,
'start_date': datetime(2020, 5, 27),
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'on_failure_callback': email_notify
}
dag = DAG('Daily_simple_stats_for_conversation',
catchup=False,
default_args=default_args,
# run every day at 3:30am PST after conversation closure
schedule_interval='30 03 * * 1-7')
dag.doc_md = __doc__
# It is not recommanded to use Variable with global scope
# but not sure if there is another way to inject airflow variables
# into envionment variables.
env = os.environ.copy()
env.update(get_environments())
daily_simple_stats = BashOperator(
task_id='simple_stats_script',
bash_command='python -m tools.analysis.simple_stats \
--start_date="{{ execution_date.format("%Y-%m-%d") }} 00:00:00" \
--end_date="{{ execution_date.format("%Y-%m-%d") }} 23:59:59" \
--timezone="{{ var.value.TIMEZONE }}" \
--message_env_filter={{ var.value.ENVIRONMENT }} \
--expand_to_full_conversations \
--store_convo_stats',
retries=1,
env=env,
dag=dag)
| StarcoderdataPython |
3277208 | <gh_stars>0
from boto3.s3.transfer import S3Transfer
from datetime import datetime
import boto3
import logging
import os
import frontmatter
s3 = boto3.client('s3')
s3r = boto3.resource('s3')
transfesr = S3Transfer(s3)
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.getenv('TABLE_NAME'))
def insert_blog(file_path,key,bucket_name,btype):
with open(file_path) as blog_content:
logging.info(f"Insert blog: {key}")
store_path = os.path.dirname(key)
try:
post = frontmatter.load(blog_content)
item = {
"category" : str(btype),
"title" : post["title"], #get rid of .md
"author" : post["author"],
"summary": "",
"src" : f"s3://{bucket_name}/{store_path}/",
"created_at" : post["date"],
"updated_at" : datetime.now().strftime('%Y-%m-%d-%H-%M-%S'),
}
response = table.put_item(Item=item)
except Exception as e:
logging.error(f"Fail insert blog {key} :{e}")
return response
def handler(event,context):
for record in event['Records']:
bucket_name = record['s3']['bucket']['name'] #get bucket name
key = record['s3']['object']['key'] #get key
os.makedirs('/tmp/blogs', exist_ok=True)
file_path = '/tmp/blogs/tempfile' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
btype = 0 #now all blogs' types are 0
try:
bucket = s3r.Bucket(bucket_name) #access bucket
bucket.download_file(key,file_path) #download file
insert_blog(file_path,key,bucket_name,btype) #insert contents into dynamodb
except Exception as e:
logging.error(f'Update blogs failed :{e}') | StarcoderdataPython |
3324637 | <gh_stars>1-10
from .base import UIBase
from .button import Button, ToggleButton
from .ui_renderer import UIRenderer
__all__ = [
"UIBase",
"Button", "ToggleButton",
"UIRenderer",
]
| StarcoderdataPython |
1779424 | <reponame>squassina/seismic-deeplearning
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
from ignite.engine.engine import Engine, State, Events
from ignite.utils import convert_tensor
import torch.nn.functional as F
from toolz import curry
from torch.nn import functional as F
import numpy as np
def _upscale_model_output(y_pred, y):
ph, pw = y_pred.size(2), y_pred.size(3)
h, w = y.size(2), y.size(3)
if ph != h or pw != w:
y_pred = F.upsample(input=y_pred, size=(h, w), mode="bilinear")
return y_pred
def create_supervised_trainer(
model,
optimizer,
loss_fn,
prepare_batch,
device=None,
non_blocking=False,
output_transform=lambda x, y, y_pred, loss: {"loss": loss.item()},
):
if device:
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
y_pred = _upscale_model_output(y_pred, y)
loss = loss_fn(y_pred.squeeze(1), y.squeeze(1))
loss.backward()
optimizer.step()
return output_transform(x, y, y_pred, loss)
return Engine(_update)
@curry
def val_transform(x, y, y_pred):
return {"image": x, "y_pred": y_pred.detach(), "mask": y.detach()}
def create_supervised_evaluator(
model, prepare_batch, metrics=None, device=None, non_blocking=False, output_transform=val_transform,
):
metrics = metrics or {}
if device:
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
y_pred = _upscale_model_output(y_pred, x)
return output_transform(x, y, y_pred)
engine = Engine(_inference)
for name, metric in metrics.items():
metric.attach(engine, name)
return engine
def create_supervised_trainer_apex(
model,
optimizer,
loss_fn,
prepare_batch,
device=None,
non_blocking=False,
output_transform=lambda x, y, y_pred, loss: {"loss": loss.item()},
):
from apex import amp
if device:
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred.squeeze(1), y.squeeze(1))
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
return output_transform(x, y, y_pred, loss)
return Engine(_update)
# def create_supervised_evaluator_apex(
# model,
# prepare_batch,
# metrics=None,
# device=None,
# non_blocking=False,
# output_transform=lambda x, y, y_pred: (x, y, pred),
# ):
# metrics = metrics or {}
# if device:
# model.to(device)
# def _inference(engine, batch):
# model.eval()
# with torch.no_grad():
# x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
# y_pred = model(x)
# return output_transform(x, y, y_pred)
# engine = Engine(_inference)
# for name, metric in metrics.items():
# metric.attach(engine, name)
# return engine
| StarcoderdataPython |
4820373 | from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy import Column, Integer
import logging
log = logging.getLogger("interactor.database.connection")
class Base:
# Metadata gets set by Sqlalchemy
metadata = None
# __repr_columns__ must be set by subclasses of Base
__repr_columns__ = None
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
def __repr__(self):
title = self.__tablename__
cols = dict((key, getattr(self, key)) for key in self.__repr_columns__)
columns = ", ".join("%s:%s" % (key, value) for key, value in cols.items())
return "<%s (%s)>" % (title, columns)
Base = declarative_base(cls=Base)
| StarcoderdataPython |
1646583 | #!/usr/bin/python
#
# File: main.py
# Date: 25-Oct-14
# Author: <NAME> <<EMAIL>>
#
# Analytics dashboard for courses running on edx-platform.
#
# Top-level module.
import logging
import os
import re
import json
import webapp2
import datetime
import gsdata
import bqutil
import auth
import local_config
import urllib
from unidecode import unidecode
from logger import GetRecentLogLines
from auth import auth_required
from stats import DataStats
from datatable import DataTableField
from datasource import DataSource
from dashboard import DashboardRoutes
from developer import DeveloperRoutes
from admin import AdminRoutes
from custom_reports import CustomReportRoutes
from reports import Reports
from collections import defaultdict, OrderedDict
from templates import JINJA_ENVIRONMENT
from storage import FileStorageRoutes
# from gviz_data_table import encode
# from gviz_data_table import Table
from google.appengine.api import memcache
mem = memcache.Client()
#-----------------------------------------------------------------------------
class MainPage(auth.AuthenticatedHandler, DataStats, DataSource, Reports):
'''
Main python class which displays views.
'''
@auth_required
def get_main(self):
'''
Main page: show list of all courses
'''
courses = self.get_course_listings()
# logging.info('course listings: %s' % courses['data'])
def add_link(xstr, row=None):
'''add link to course page to table element'''
cid = row.get('course_id', None)
if cid:
return "<a href=/course/%s>%s</a>" % (cid, xstr)
else:
return xstr
html = self.list2table(map(DataTableField,
[{'field': 'launch', 'title':'Course launch', 'width': '12%'},
{'field': 'course_number', 'title': 'Course #', 'width': '5%'},
{'field': 'course_image', 'title': 'Course image'},
{'field': 'title', 'title': 'Course Title', 'width': '40%'},
{'field': 'course_id', 'title': 'course ID', 'width': '12%'},
]),
courses['data'],
eformat={'course_number': add_link, 'title': add_link},
)
data = self.common_data
data.update({'data': {},
'is_staff': self.is_superuser(),
'is_pm': self.is_pm(),
'table': html,
})
template = JINJA_ENVIRONMENT.get_template('courses.html')
self.response.out.write(template.render(data))
@auth_required
def ajax_get_datafile(self, file_name=''):
'''
get static datafile
only certain files may be accessed.
'''
allowed_files = [ "geographic_regions_by_country", ]
self.response.headers['Content-Type'] = 'application/json'
if file_name not in allowed_files:
self.response.out.write(json.dumps({'ok': False, }))
return
the_fn = file_name + '.csv'
retdat = self.get_datafile(the_fn)
retdat['ok'] = True
retdat['orgname'] = self.ORGNAME
self.response.out.write(json.dumps(retdat))
@auth_required
def ajax_switch_collection(self):
'''
Switch collection to that specified.
'''
if not self.user in self.AUTHORIZED_USERS: # require superuser
return self.no_auth_sorry()
selection = self.request.GET.get('selection', None)
if selection is None:
selection = self.request.POST.get('selection', None)
if selection is None:
logging.error("[ajax_switch_selection] Error! selection=%s" % selection)
collection = selection.split('Option:', 1)[-1]
logging.info("="*50 + " collection=%s, selection=%s" % (collection, selection))
self.set_current_collection(collection)
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps({'ok': True,
'dataset_latest': self.use_dataset_latest(),
'collection_name': self.current_collection(),
'collections_available': self.collections_available(),
}))
# self.session_store.save_sessions(self.response)
@auth_required
def ajax_get_usage_stats(self, org=None, number=None, semester=None):
'''
Return json with course overall stats on activity, including number of registrants, viewers,
explorers, certified.
'''
course_id = '/'.join([org, number, semester])
usdat = self.compute_usage_stats(course_id)['data'][0]
# categories = ['registered', 'viewed', 'explored']
data = {'data': [usdat] }
# logging.info('ajax_get_usage_stats data=%s' % data)
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
@auth_required
def ajax_get_enrollment_stats(self, org=None, number=None, semester=None):
'''
Return enrollment stats in "series" format for HighCharts.
Use course listings to determine start date
'''
course_id = '/'.join([org, number, semester])
courses = self.get_course_listings()
start = courses['data_by_key'][course_id]['launch']
(y,m,d) = map(int, start.split('-'))
start_dt = datetime.datetime(y, m, d)
start_dt = start_dt - datetime.timedelta(days=32*9) # start plot 9 months before launch
start_str = start_dt.strftime('%Y-%m-%d')
end_dt = start_dt + datetime.timedelta(days=32*1) # default position for end selector
end_str = end_dt.strftime('%Y-%m-%d')
logging.info('initial start_dt=%s, end_dt=%s' % (start_dt, end_dt))
bqdat = self.compute_enrollment_by_day(course_id, start=start_str)
dtrange = ['2199-12-31', '1900-01-01']
def getrow(x, field, scale):
dtstr = x['date']
if dtstr < dtrange[0]: # min max dates from actual data
dtrange[0] = dtstr
if dtstr > dtrange[1]:
dtrange[1] = dtstr
ts = self.datetime2milliseconds(dtstr=dtstr) # (dt - datetime.datetime(1970, 1, 1)).total_seconds() * 1000
cnt = int(x[field])
return [ts, cnt]
def getseries(field):
return {'name': field['name'], 'data': [ getrow(x, field['field'], field['scale']) for x in bqdat['data'] ]}
def mkf(field, name, scale=1):
return {'field': field, 'name': name, 'scale': scale}
fields = [mkf('nenroll_cum', 'cumulative enrollment')]
stats = [ getseries(sname) for sname in fields ]
dtrange = [ datetime.datetime(*map(int, x.split('-'))) for x in dtrange ]
start_dt = max(dtrange[0], start_dt)
end_dt = max(dtrange[0], end_dt)
if end_dt == start_dt:
end_dt = end_dt + datetime.timedelta(days=32*2)
end_dt = min(dtrange[1], end_dt)
logging.info('dtrange=%s, actual start_dt=%s, end_dt=%s' % (dtrange, start_dt, end_dt))
data = {'series': stats,
'start_dt': self.datetime2milliseconds(start_dt),
'end_dt': self.datetime2milliseconds(end_dt),
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
def setup_module_info(self, org=None, number=None, semester=None, url_name=None, seq=None):
'''
setup common module info for problem, video, html
'''
course_id = '/'.join([org, number, semester])
caxis = self.load_course_axis(course_id)
# get problem info
the_module = caxis[url_name]
module_id = the_module['module_id']
chapter_mid = the_module['chapter_mid']
chapter_url_name = chapter_mid.rsplit('/',1)[-1]
name = the_module['name']
error = None
# if seq is specified, then look for the problem before or after, as requested
if seq in ["next", "prev"]:
problems = [x for x in caxis.values() if (x['chapter_mid']==chapter_mid) and x['category']=='problem']
problems.sort(cmp=lambda x,y: int(x['index'])-int(y['index'])) # list of problems ordered by course axis index
try:
pidx = problems.index(the_module)
except Exception as err:
error = {'msg': "Module %s not found in list of problems %s" % (url_name, problems)}
pidx = None
if (pidx is not None) and seq=='next':
if pidx < len(problems)-1:
the_module = problems[pidx+1]
module_id = the_module['module_id']
url_name = module_id.rsplit('/',1)[-1]
else:
error = {'msg': "No problem available after %s in this chapter" % (url_name)}
if (pidx is not None) and seq=='prev':
if pidx > 0:
the_module = problems[pidx-1]
module_id = the_module['module_id']
url_name = module_id.rsplit('/',1)[-1]
else:
error = {'msg': "No problem available before %s in this chapter" % (url_name)}
# use listings to determine if the course_id is being mangled by opaque keys, so that the correct jump_to link can be created.
# specifically, use the "Course Key Version" column. If that column exists, and has "v1", then the course_key will be
# of the form "course-v1:ORG+NUM+SEMESTER", instead of the usual courses_id form of ORG/NUM/SEMESTER.
# if this information is unavailable, then use course_id as the course_key.
courses = self.get_course_listings()
course_key_version = courses['data_by_key'][course_id].get('Course Key Version', "standard").strip().lower()
if course_key_version=="v1":
(org, num, semester) = course_id.split('/', 3)
course_key = "course-v1:%s+%s+%s" % (org, num, semester)
else:
course_key = course_id
data = self.common_data.copy()
data.update({'course_id': course_id,
'course_key': course_key, # used for jump_to_id, for example; handles edX's opaque keys
'name': name,
'chapter_mid': chapter_mid,
'cun': chapter_url_name,
'base': self.get_base(course_id),
'url_name': url_name,
'module_id': module_id,
'module': the_module,
'caxis': caxis,
'error': error,
'course_key_version': course_key_version,
})
return data
@auth_required
def get_problem(self, **kwargs):
'''
single problem analytics view
- iframe
'''
data = self.setup_module_info(**kwargs)
data.update({'custom_report': self.custom_report_container(self.is_authorized_for_custom_report,
course_id=data['course_id'],
chapter_id=data['chapter_mid'],
module_id=data['module_id'],
),
})
# template = os.path.join(os.path.dirname(__file__), 'problem.html')
template = JINJA_ENVIRONMENT.get_template('problem.html')
self.response.out.write(template.render(data))
@auth_required
def ajax_get_problem_stats(self, org=None, number=None, semester=None, problem_url_name=None):
course_id = '/'.join([org, number, semester])
ps = self.compute_problem_stats(course_id)
# logging.info('problem url_name = %s' % problem_url_name)
pstats = ps['data_by_key'].get(problem_url_name, [])
# logging.info('pstats = %s' % pstats)
data = {'data': [ pstats ] }
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
@auth_required
def ajax_get_problem_answer_histories(self, org=None, number=None, semester=None, problem_url_name=None):
course_id = '/'.join([org, number, semester])
pah = self.select_problem_answer_histories(course_id, problem_url_name)
# reformat student answers to be readable
# also compute histogram
histograms = defaultdict(lambda: defaultdict(int))
latest_date = "0"
for entry in pah['data']:
entry['time'] = self.fix_date(entry['time'])
if entry['time'] > latest_date:
latest_date = entry['time']
sa = entry['student_answers']
sadat = json.loads(sa)
sastr = "<table>"
keys = sadat.keys()
keys.sort()
for k in keys: # merge dynamath with actual entry
if k.endswith('_dynamath'):
aid = k.rsplit('_',1)[0]
if sadat[aid].strip():
sadat[aid] += u' \u21d2 ' + sadat[k]
def fix_choice(citem):
def choice2letter(m):
return chr(ord('A')+int(m.group(1)))
return re.sub('choice_(\d+)', choice2letter, citem)
for k in keys:
if k.endswith('_dynamath'):
continue
answer_id = '_'.join(k.rsplit('_',2)[1:]) # see edx_platform/common/lib/capa/capa/capa_problem.py
answer = sadat[k]
hist_done = False
if type(answer)==str and answer.startswith("[u'choce_") and answer.strip():
answer = answer.replace("u'","")
elif type(answer)==list: # for lists, make histogram of each item separately
answer = map(fix_choice, answer)
for answer_item in answer:
histograms[answer_id][answer_item] += 1
hist_done = True
# answer = str([str(x) for x in answer])
answer = ', '.join(answer)
elif type(answer) in [str, unicode] and ('%20' in answer) and answer.strip():
answer = urllib.unquote(answer).strip()
if type(answer) in [str, unicode] and answer.startswith('choice_'):
answer = fix_choice(answer)
if not hist_done and answer.strip():
histograms[answer_id][answer] += 1
sastr += "<tr><td>%s:</td><td>%s</td></tr>" % (answer_id, answer)
sastr += "</table>"
entry['answer'] = sastr
# chop histogram tables into just top 20
for aid, hdat in histograms.items():
topitems = hdat.items()
topitems.sort(key=lambda(x): x[1], reverse=True)
topitems = topitems[:20]
histograms[aid] = topitems
# also order the histograms by aid
histograms = OrderedDict(sorted(histograms.items()))
# logging.info(json.dumps(histograms, indent=4))
data = {'data': pah['data'],
'items': histograms.keys(),
'histograms': histograms, # { k: v.items() for (k,v) in histograms.items()},
'data_date': latest_date[:16],
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
@auth_required
def ajax_get_activity_stats(self, org=None, number=None, semester=None):
'''
Return activity stats in "series" format for HighCharts
See http://www.highcharts.com/docs/chart-concepts/series
Use course listings to determine start date
'''
course_id = '/'.join([org, number, semester])
courses = self.get_course_listings()
start = courses['data_by_key'][course_id]['launch']
(y,m,d) = map(int, start.split('-'))
start_dt = datetime.datetime(y, m, d)
if start_dt > datetime.datetime.now():
start_dt = start_dt - datetime.timedelta(days=7*10) # start plot 10 weeks before launch
else:
start_dt = start_dt - datetime.timedelta(days=14) # start plot 2 weeks before launch
start_str = start_dt.strftime('%Y-%m-%d')
ongoing = self.get_collection_metadata('ONGOING', False)
if ongoing:
end_dt = datetime.datetime.now()
else:
# logging.info("start_str = %s" % start_str)
end_dt = start_dt + datetime.timedelta(days=32*4) # default position for end selector
end_str = end_dt.strftime('%Y-%m-%d')
the_end = self.get_collection_metadata('END_DATE', '2015-01-01')
if end_str > the_end:
the_end = end_str
try:
bqdat = self.compute_activity_by_day(course_id, start=start_str, end=the_end)
except Exception as err:
logging.error("failed in calling compute_activity_by_day, err=%s" % str(err))
data = {'series': [],
'start_dt': self.datetime2milliseconds(start_dt),
'end_dt': self.datetime2milliseconds(end_dt),
'data_date': '',
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
return
def getrow(x, field, scale):
#return [x[k] for k in ['date', 'nevents', 'nforum']]
try:
(y,m,d) = map(int, x['date'].split('-'))
dt = datetime.datetime(y,m,d)
ts = self.datetime2milliseconds(dt) # (dt - datetime.datetime(1970, 1, 1)).total_seconds() * 1000
cnt = int(x[field]) / scale
except Exception as err:
logging.error("failed in getrow, field=%s, x=%s, err=%s" % (field, x, err))
cnt = 0
return [ts, cnt]
def getseries(field):
return {'name': field['name'], 'data': [ getrow(x, field['field'], field['scale']) for x in bqdat['data'] ]}
def mkf(field, name, scale):
return {'field': field, 'name': name, 'scale': scale}
fields = [mkf('nevents', '# events / 10', 10),
mkf('nforum', '# forum events', 1),
mkf('nvideo', '# video events', 1),
mkf('nproblem_check', '# problem check events', 1),
mkf('nshow_answer', '# show answer events', 1)]
stats = [ getseries(sname) for sname in fields ]
#stats = [ getseries(sname) for sname in ['nevents'] ]
data = {'series': stats,
'start_dt': self.datetime2milliseconds(start_dt),
'end_dt': self.datetime2milliseconds(end_dt),
# 'data_date': bqdat.get('depends_on', ['.'])[0].split('.',1)[1],
'data_date': (bqdat['data'] or [{'date': None}])[-1]['date'],
'launch': start,
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
@auth_required
def get_html(self, **kwargs):
'''
single html analytics view
- iframe
'''
data = self.setup_module_info(**kwargs)
template = JINJA_ENVIRONMENT.get_template('html_page.html')
self.response.out.write(template.render(data))
@auth_required
def get_video(self, **kwargs):
'''
single video analytics view
- iframe
'''
data = self.setup_module_info(**kwargs)
template = JINJA_ENVIRONMENT.get_template('video.html')
self.response.out.write(template.render(data))
@auth_required
def ajax_get_chapter_stats(self, org=None, number=None, semester=None, url_name=None):
'''
single chapter analytics view - actual data
'''
course_id = '/'.join([org, number, semester])
caxis = self.load_course_axis(course_id)
the_chapter = caxis[url_name]
chapter_mid = the_chapter['module_id']
# get module usage counts
self.compute_sm_usage(course_id)
# get chapter contents
ccontents = [x for x in caxis.values() if (x['chapter_mid']==chapter_mid) and (not x['category']=='vertical')]
# problem stats
try:
ps = self.compute_problem_stats(course_id)
pstats = ps['data_by_key']
except Exception as err:
pstats = {}
# add nuser_views
for caent in ccontents:
caent['nuser_views'] = self.get_sm_nuser_views(caent['module_id'])
caent['avg_grade'] = ''
caent['max_grade'] = ''
caent['avg_attempts'] = ''
caent['max_attempts'] = ''
caent['nsubmissions'] = ''
if caent['category']=='problem':
ps = pstats.get(caent['url_name'], None)
if ps is not None:
caent['avg_grade'] = '%6.2f' % float(ps['avg_grade'])
caent['max_grade'] = '%6.2f' % float(ps['max_max_grade'])
caent['avg_attempts'] = '%6.2f'% float(ps['avg_attempts'])
caent['max_attempts'] = ps['max_attempts']
caent['nsubmissions'] = ps['nsubmissions']
def makelink(txt, rdat, link_str=None):
try:
if link_str is None:
link_str = "<a href='/{category}/{course_id}/{url_name}'>{txt}</a>"
link = link_str.format(txt=(txt.encode('utf8') or "none"),
course_id=course_id,
category=rdat['category'],
url_name=rdat['url_name'],
cid_enc=urllib.urlencode({'course_id':course_id}),
)
except Exception as err:
logging.error('oops, cannot make link for %s' % repr(rdat))
link = txt
p = {'base': self.get_base(course_id),
'course_id': course_id,
'cmid': url_name,
'url_name': rdat['url_name'],
}
# cases for url base
link += "<span style='float:right'><a href='{base}/courses/{course_id}/jump_to_id/{url_name}'><img src=/images/link-small.png></a></span>".format(**p)
return link
def makerow(rdat):
row = rdat
row['start'] = self.fix_date(rdat['start'])
if row['category'] in ['problem', 'video', 'html']:
row['name'] = makelink(row['name'], rdat)
elif row['category'] in ['openassessment']:
row['name'] = makelink(row['name'], rdat,
"<a href='/page/openassessment_report?problem_id={url_name}&{cid_enc}'>{txt}</a>")
return row
tabledata = json.dumps({'data': [ makerow(x) for x in ccontents ]})
self.response.out.write(tabledata)
@auth_required
def get_chapter(self, org=None, number=None, semester=None, url_name=None, seq=None):
'''
single chapter analytics view: container for table data
- sequentials and problems
'''
course_id = '/'.join([org, number, semester])
caxis = self.load_course_axis(course_id)
# get chapter info
the_chapter = caxis[url_name]
# if seq is specified, then look for the chapter before or after, as requested
error = None
if seq in ["next", "prev"]:
chapters = [x for x in caxis.values() if (x['category']=='chapter')]
chapters.sort(cmp=lambda x,y: int(x['index'])-int(y['index'])) # list of chapters ordered by course axis index
try:
cidx = chapters.index(the_chapter)
except Exception as err:
error = {'msg': "Module %s not found in list of chapters %s" % (url_name, chapters)}
cidx = None
if (cidx is not None) and seq=='next':
if cidx < len(chapters)-1:
the_chapter = chapters[cidx+1]
module_id = the_chapter['module_id']
url_name = module_id.rsplit('/',1)[-1]
else:
error = {'msg': "No chapter available after %s in this course" % (url_name)}
if (cidx is not None) and seq=='prev':
if cidx > 0:
the_chapter = chapters[cidx-1]
module_id = the_chapter['module_id']
url_name = module_id.rsplit('/',1)[-1]
else:
error = {'msg': "No chapter available before %s in this course" % (url_name)}
chapter_mid = the_chapter['module_id']
chapter_name = the_chapter['name']
fields = [ DataTableField(x) for x in [{'field': 'index', 'title': 'Time index', 'width': '8%', 'class': 'dt-center'},
{'field': 'category', 'title': "Module category", 'width': '10%'},
{'field': 'name', 'title': "Module name"},
{'field': 'nsubmissions', 'title': "# submissions", 'width': '7%', 'class': 'dt-center'},
{'field': 'avg_grade', 'title': "AVG grade", 'width': '7%', 'class': 'dt-center'},
{'field': 'max_grade', 'title': "MAX grade", 'width': '7%', 'class': 'dt-center'},
{'field': 'avg_attempts', 'title': "AVG attempts", 'width': '7%', 'class': 'dt-center'},
{'field': 'max_attempts', 'title': "MAX attempts", 'width': '7%', 'class': 'dt-center'},
{'field': 'start', 'title': "Start date", 'width': '12%', 'class': 'dt-center'},
{'field': 'nuser_views', 'title': '# user views', 'width': '7%', 'class': 'dt-center'},
# {'field': 'url_name', 'title': 'url_name'},
] ]
tablehtml = self.list2table(fields, [])
tablefields = json.dumps([x.colinfo() for x in fields])
data = self.common_data
data.update({'fields': tablefields,
'table': tablehtml,
'course_id': course_id,
'chapter_name': chapter_name,
'url_name': url_name,
'error': error,
})
template = JINJA_ENVIRONMENT.get_template('chapter.html')
self.response.out.write(template.render(data))
@auth_required
def ajax_get_course_stats(self, org=None, number=None, semester=None):
'''
single course analytics view - data only. Provides table data for "content by chapter"
table in the one_course.html template.
'''
course_id = '/'.join([org, number, semester])
caxis = self.load_course_axis(course_id)
#logging.info('chapters = %s' % [x['name'] for x in caxis.values() if x['category']=='chapter'])
# get module usage counts
self.compute_sm_usage(course_id)
# overall content stats
counts = defaultdict(int)
counts_by_chapter = defaultdict(lambda: defaultdict(int))
sequentials_by_chapter = defaultdict(list)
for index, caent in caxis.iteritems():
if caent['category'] in ['course']:
continue
counts[caent['category']] += 1
if 'chapter_mid' in caent:
counts_by_chapter[caent['chapter_mid']][caent['category']] += 1
if caent['category']=='sequential':
caent['nuser_views'] = self.get_sm_nuser_views(caent['module_id'])
sequentials_by_chapter[caent['chapter_mid']].append(caent)
stats_fields = counts.keys()
# stats_fields = [ DataTableField({'field': x, 'class': 'dt-center'}) for x in stats_fields]
stats_table = self.list2table(stats_fields, [counts], tid="stats_table")
def makelink(txt, rdat):
txt = txt.encode('utf8')
return "<a href='/chapter/{course_id}/{url_name}'>{txt}</a>".format(txt=txt,
course_id=course_id,
url_name=rdat['url_name'])
def makerow(rdat):
# row = [rdat['index'], makelink(rdat['name'], rdat), fix_date(rdat['start'])]
#row = {'index': rdat['index'], 'name': makelink(rdat['name'], rdat), 'start': fix_date(rdat['start'])}
row = rdat
chapter_mid = rdat['module_id']
row['name'] = makelink(rdat['name'], rdat)
row['start'] = self.fix_date(rdat['start'])
row['n_sequential'] = counts_by_chapter[chapter_mid]['sequential']
row['n_html'] = counts_by_chapter[chapter_mid]['html']
row['n_problem'] = counts_by_chapter[chapter_mid]['problem']
row['n_video'] = counts_by_chapter[chapter_mid]['video']
row['sequentials'] = json.dumps(sequentials_by_chapter[chapter_mid])
row['nuser_views'] = self.get_sm_nuser_views(chapter_mid)
return row
data = {'data': [ makerow(x) for x in caxis.values() if x['category']=='chapter'],
'stats_columns': [ {'className': 'dt-center'}] *len(stats_fields),
'stats_table': stats_table,
'data_date': str(self.course_axis['lastModifiedTime'])[:16],
}
tabledata = json.dumps(data)
self.response.out.write(tabledata)
@auth_required
def ajax_get_geo_stats(self, org=None, number=None, semester=None):
'''
geographic stats for a course
'''
course_id = '/'.join([org, number, semester])
bqdat = self.compute_geo_stats(course_id)
def mkpct(a,b):
if not b:
return ""
if int(b)==0:
return ""
return "%6.1f" % (int(a) / float(b) * 100)
def getrow(x):
if not x['countryLabel']:
x['countryLabel'] = 'Unknown'
x['cert_pct'] = mkpct(x['ncertified'], x['nregistered'])
x['cert_pct_of_viewed'] = mkpct(x['ncertified'], x['nviewed'])
x['verified_cert_pct'] = mkpct(x['n_verified_certified'], x['n_verified_id'])
x['avg_hours'] = "%8.1f" % (float(x['avg_of_sum_dt'] or 0)/60/60) # hours
x['avg_hours_certified'] = "%8.1f" % (float(x['certified_sum_dt'] or 0)/60/60) # hours
x['nverified'] = x['n_verified_id'] # for compatibility with report_geo_stats
return { 'z': int(x['nregistered']),
'cc': x['cc'],
'name': x['countryLabel'],
'nverified': x['n_verified_id'],
'ncertified': x['ncertified'],
'cert_pct': x['cert_pct'],
}
series = [ getrow(x) for x in bqdat['data'] ]
#top_by_reg = sorted(bqdat['data'], key=lambda x: int(x['nregistered']), reverse=True)[:10]
# logging.info("top_by_reg = %s" % json.dumps(top_by_reg, indent=4))
data = {'series': series,
'table': bqdat['data'],
'data_date': str(bqdat['lastModifiedTime'])[:16],
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
@auth_required
def get_course(self, org=None, number=None, semester=None):
'''
single course analytics view
- overall statistics (number of components of various categories)
- show table of chapters
'''
course_id = '/'.join([org, number, semester])
# handle forced recomputation requests
action = self.request.POST.get('action', None)
logging.info('post keys = %s' % self.request.POST.keys())
logging.info('post action = %s' % action)
if action=='force recompute enrollment':
self.reset_enrollment_by_day(course_id)
# show table with just chapters, and present sequentials as extra information when clicked
fields = [ DataTableField(x) for x in [{'field': 'index', 'title': 'Time index', 'width': '8%', 'class': 'dt-center'},
{'field': 'name', 'title': "Chapter name"},
{'field': 'start', 'title': "Start date", 'width': '18%'},
{'field': 'nuser_views', 'title': '# user-views', 'width': '10%', 'class': 'dt-center'},
] ]
# logging.info('sm_usage:')
# logging.info(self.bqdata['stats_module_usage']['data_by_key'])
tablehtml = self.list2table([' '] + fields, [])
tablefields = json.dumps([
{
"class": 'details-control',
"orderable": False,
"data": None,
'width': '5%',
"defaultContent": ''
},] + [x.colinfo() for x in fields])
data = self.common_data.copy()
data.update({'course_id': course_id,
'fields': tablefields,
'table': tablehtml,
'is_staff': self.is_superuser(),
'is_pm': self.is_pm(),
'does_user_have_role': self.does_user_have_role,
'image': self.get_course_image(course_id),
'nav_is_active': self.nav_is_active('onecourse'),
'custom_report': self.custom_report_container(self.is_authorized_for_custom_report,
course_id=course_id),
})
template = JINJA_ENVIRONMENT.get_template('one_course.html')
self.response.out.write(template.render(data))
@auth_required
def get_axis(self, org=None, number=None, semester=None):
'''
show full course axis -- mainly for debugging
'''
course_id = '/'.join([org, number, semester])
if not self.is_user_authorized_for_course(course_id):
return self.no_auth_sorry()
caxis = self.load_course_axis(course_id, dtype='data')
if self.request.get('ajax'):
# return JSON instead of HTML
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(self.course_axis, default=self.json_serializer_with_datetime, indent=4))
return
if self.request.get('chapters'):
# return JSON of just chapters
chapters = [x for x in caxis if x['category']=='chapter']
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(chapters, indent=4))
return
# logging.info("caxis=%s" % json.dumps(caxis, indent=4))
for cae in caxis:
try:
# caxis[row]['name'] = fix_bad_unicode(caxis[row]['name'])
#caxis[row]['name'] = caxis[row]['name'].replace('\u2013','-')
#caxis[row]['name'] = str(caxis[row]['name'])
cae['name'] = unidecode(cae['name']) # desparation: perhaps data wasn't encoded properly originally?
if cae['gformat']:
cae['gformat'] = unidecode(cae['gformat']) # desparation: perhaps data wasn't encoded properly originally?
# cae['name'] = str(cae['name'])
except Exception as err:
print "unicode error for course axis row=%s, name=" % repr(cae), repr(cae['name'])
print "type = ", type(cae['name'])
raise
if 1:
fields = ['category', 'index', 'url_name', 'name', 'gformat', 'due', 'start',
'module_id', 'path', 'data_ytid', 'data_weight', 'chapter_mid']
#fields = ['category', 'index', 'name', 'due', 'start',
# 'module_id', 'path', 'data_ytid', 'data_weight', 'chapter_mid']
tablehtml = self.list2table(fields,
caxis,
eformat={'due': self.fix_date, 'start': self.fix_date}, )
data = self.common_data
data.update({'course_id': course_id,
'table': tablehtml,
})
template = JINJA_ENVIRONMENT.get_template('course_axis.html')
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
self.response.out.write(template.render(data))
@auth_required
#def get_glossary(self, org=None, number=None, semester=None, type=None):
def get_glossary(self, type=None):
'''
Show Glossary data
'''
data = ''
template = JINJA_ENVIRONMENT.get_template('glossary/glossary_%s.html' % type)
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
self.response.out.write(template.render(data))
@auth_required
def ajax_get_table_data(self, org=None, number=None, semester=None, table=None):
'''
show arbitrary table from bigquery -- mainly for debugging - ajax data
'''
course_id = '/'.join([org, number, semester])
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=self.use_dataset_latest())
if ('person' in table) or ('track' in table) or ('student' in table):
if not self.does_user_have_role('instructor', course_id):
return self.no_auth_sorry()
# be more restrictive: researchers only
if not (self.does_user_have_role('researcher', course_id)):
return self.no_auth_sorry()
# DataTables server-side processing: http://datatables.net/manual/server-side
draw = int(self.request.POST['draw'])
start = int(self.request.POST['start'])
length = int(self.request.POST['length'])
bqdata = self.cached_get_bq_table(dataset, table, startIndex=start, maxResults=length)
self.fix_bq_dates(bqdata)
logging.info('get draw=%s, start=%s, length=%s' % (draw, start, length))
if 0:
for row in bqdata['data']:
for key in row:
row[key] = row[key].encode('utf-8')
data = self.common_data
data.update({'data': bqdata['data'],
'draw': draw,
'recordsTotal': bqdata['numRows'],
'recordsFiltered': bqdata['numRows'],
})
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
@auth_required
def get_table(self, dataset=None, table=None, org=None, number=None,semester=None):
'''
show arbitrary table from bigquery -- mainly for debugging
'''
if dataset is None:
course_id = '/'.join([org, number, semester])
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=self.use_dataset_latest())
if not self.is_user_authorized_for_course(course_id):
return self.no_auth_sorry()
if ('person' in table) or ('track' in table) or ('student' in table):
if not self.does_user_have_role('instructor', course_id):
return self.no_auth_sorry()
# be more restrictive: researchers only
if not (self.does_user_have_role('researcher', course_id)):
return self.no_auth_sorry()
else:
course_id = None
if not self.user in self.AUTHORIZED_USERS:
return self.no_auth_sorry()
tableinfo = bqutil.get_bq_table_info(dataset, table)
fields = tableinfo['schema']['fields']
field_names = [x['name'] for x in fields]
tablecolumns = json.dumps([ { 'data': x, 'title': x, 'class': 'dt-center' } for x in field_names ])
logging.info(tablecolumns)
data = self.common_data
data.update({'dataset': dataset,
'table': table,
'course_id': course_id,
'tablecolumns': tablecolumns,
})
template = JINJA_ENVIRONMENT.get_template('show_table.html')
self.response.out.write(template.render(data))
config = {}
config['webapp2_extras.sessions'] = {
'secret_key': local_config.SESSION_SECRET_KEY,
}
ROUTES = [
# html pages
webapp2.Route('/', handler=MainPage, handler_method='get_main'),
webapp2.Route('/course/<org>/<number>/<semester>', handler=MainPage, handler_method='get_course'),
webapp2.Route('/chapter/<org>/<number>/<semester>/<url_name>', handler=MainPage, handler_method='get_chapter'),
webapp2.Route('/chapter/<org>/<number>/<semester>/<url_name>/<seq>', handler=MainPage, handler_method='get_chapter'),
webapp2.Route('/problem/<org>/<number>/<semester>/<url_name>', handler=MainPage, handler_method='get_problem'),
webapp2.Route('/problem/<org>/<number>/<semester>/<url_name>/<seq>', handler=MainPage, handler_method='get_problem'), # for next and prev
webapp2.Route('/video/<org>/<number>/<semester>/<url_name>', handler=MainPage, handler_method='get_video'),
webapp2.Route('/html/<org>/<number>/<semester>/<url_name>', handler=MainPage, handler_method='get_html'),
webapp2.Route('/axis/<org>/<number>/<semester>', handler=MainPage, handler_method='get_axis'),
webapp2.Route('/table/<org>/<number>/<semester>/<table>', handler=MainPage, handler_method='get_table'),
webapp2.Route('/table/<database>/<table>', handler=MainPage, handler_method='get_table'),
webapp2.Route('/glossary/<type>', handler=MainPage, handler_method='get_glossary'),
# ajax calls
webapp2.Route('/get/<org>/<number>/<semester>/activity_stats', handler=MainPage, handler_method='ajax_get_activity_stats'),
webapp2.Route('/get/<org>/<number>/<semester>/enrollment_stats', handler=MainPage, handler_method='ajax_get_enrollment_stats'),
webapp2.Route('/get/<org>/<number>/<semester>/usage_stats', handler=MainPage, handler_method='ajax_get_usage_stats'),
webapp2.Route('/get/<org>/<number>/<semester>/course_stats', handler=MainPage, handler_method='ajax_get_course_stats'),
webapp2.Route('/get/<org>/<number>/<semester>/geo_stats', handler=MainPage, handler_method='ajax_get_geo_stats'),
webapp2.Route('/get/<org>/<number>/<semester>/<url_name>/chapter_stats', handler=MainPage, handler_method='ajax_get_chapter_stats'),
webapp2.Route('/get/<org>/<number>/<semester>/<problem_url_name>/problem_stats', handler=MainPage, handler_method='ajax_get_problem_stats'),
webapp2.Route('/get/<org>/<number>/<semester>/<table>/table_data', handler=MainPage, handler_method='ajax_get_table_data'),
webapp2.Route('/get/<org>/<number>/<semester>/<problem_url_name>/problem_histories', handler=MainPage, handler_method='ajax_get_problem_answer_histories'),
webapp2.Route('/get/dashboard/geo_stats', handler=MainPage, handler_method='ajax_dashboard_get_geo_stats'),
webapp2.Route('/get/datafile/<file_name>', handler=MainPage, handler_method='ajax_get_datafile'),
webapp2.Route('/get/switch_collection', handler=MainPage, handler_method='ajax_switch_collection'),
]
ROUTES += DashboardRoutes
ROUTES += AdminRoutes
ROUTES += DeveloperRoutes
ROUTES += CustomReportRoutes
ROUTES += FileStorageRoutes
application = webapp2.WSGIApplication(ROUTES, debug=True, config=config)
| StarcoderdataPython |
73734 | <filename>CoordenacaoFacil/models/Student.py
from werkzeug.security import generate_password_hash, check_password_hash
from CoordenacaoFacil import db
class Student():
def __init__(self, code="", name="", email="", password="", course=None, university=None, createdAt=""):
self.code = code
self.name = name
self.email = email
self.password = password
self.university = university
self.course = course
self.createdAt = createdAt
self.type = "student"
def create(self, student=None):
try:
db.students.insert({
"code": student.code,
"name": student.name,
"email": student.email,
"password": generate_password_hash(student.password),
"university": student.university,
"course": student.course,
"createdAt": student.createdAt,
"type": self.type
})
return True
except:
print("Houve um problema ao cadastrar novo estudante.")
return False
def login(self, code="", password=""):
try:
student = db.students.find_one({
"code": code
})
if student:
if check_password_hash(student["password"], password):
return True
return False
except:
print("Houve um problema ao entrar na aplicação.")
return False
def getUserByCode(self, code=""):
try:
student = db.students.find_one({
"code": code
})
return student
except:
print("Houve um problema ao obter estudante.")
return False
| StarcoderdataPython |
3306878 | <gh_stars>10-100
from django.core.exceptions import ValidationError
class FieldFactory():
"""
Factory
"""
fields = {}
def get_class(id):
return FieldFactory.fields[id]
def get_all_classes():
return FieldFactory.fields.values()
def register(id, type):
if id not in FieldFactory.fields:
FieldFactory.fields[id] = type
else:
raise ValidationError("invalid ID.")
def get_strings():
l = {}
for key in FieldFactory.fields:
l[key] = FieldFactory.fields[key]().__str__()
return l
| StarcoderdataPython |
1634593 | #!/usr/bin/env python
import xml.etree.ElementTree as ET
import os
import collections
s = os.sep
labelfile = '/data1/datasets/VOC/'
testfile = os.path.join(labelfile, '2007_test.txt')
trainfile = os.path.join(labelfile, '2007_train.txt')
valfile = os.path.join(labelfile, '2007_val.txt')
filelist = [testfile, trainfile, valfile]
for i, file in enumerate(filelist):
with open(file) as f:
data = open(str(i)+'xml.txt', 'w')
alldata = f.readlines()
for line in alldata:
imagedir = line.strip()
imagename = os.path.basename(imagedir)[:-3]
dir = os.path.dirname(os.path.dirname(imagedir))
Annotation = os.path.join(dir, 'Annotations', imagename+'xml')
tree = ET.parse(Annotation)
root = tree.getroot()
filename = os.path.join('/data1/datatsets/VOC/VOCdevkit/VOC2007/JPEGImages', root.find('filename'))
allobj = root.findall('object')
for obj in allobj:
if obj.find('name').text == 'person':
bndbox = obj.find('bndbox')
xmin = bndbox.find('xmin').text
ymin = bndbox.find('ymin').text
xmax = bndbox.find('xmax').text
ymax = bndbox.find('ymax').text
print(filename,',',xmin,',',ymin,',',xmax,',',ymax,',',0,file=data)
data.close()
| StarcoderdataPython |
13217 | #!/usr/bin/python3
print("content-type: text/html")
print()
import subprocess as sp
import cgi
fs = cgi.FieldStorage()
cmd = fs.getvalue("command")
output = sp.getoutput("sudo "+cmd)
print("<body style='padding: 40px;'>")
print('<h1 style="color:#df405a;" >Output</h1>')
print("<pre>{}</pre>".format(output))
print("</body>")
| StarcoderdataPython |
3396091 | <gh_stars>0
from decimal import Decimal
import mimetypes
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.views.generic.base import View
from django.conf import settings
from django.urls import reverse_lazy
from django.contrib import messages
from books.models import Account, AccountGroup, DeclaredSource
from books.forms import NewSourceForm, SourceDeclarationForm
from books.utils import get_account_for_user
import books.financial_statements as financial_statements
class DashboardView(View):
template_name = 'books/dashboard/home.html'
def get(self, request, *args, **kwargs):
ctx = {}
return render(request, self.template_name, ctx)
class DownloadFinancialStatementView(View):
document_name = None
def create_file(self, request):
raise RuntimeError('{} view failed to define create file function'.format(
self.__class__))
def get(self, request, *args, **kwargs):
downloadable_file = self.create_file(request)
with downloadable_file.open() as f:
mime_type_guess = mimetypes.guess_type(self.document_name)
response = HttpResponse(f, content_type=mime_type_guess[0])
response['Content-Disposition'] = 'attachment; filename=' + self.document_name
return response
class DownloadTrialBalanceView(DownloadFinancialStatementView):
document_name = 'trial_balance.pdf'
def create_file(self, request):
acc = get_account_for_user(request.user)
tb = financial_statements.TrialBalance(acc)
return tb.as_pdf(file_name = 'tb_out.pdf')
class DownloadProfitAndLossView(DownloadFinancialStatementView):
document_name = 'profit_and_loss.pdf'
def create_file(self, request):
acc = get_account_for_user(request.user)
pl = financial_statements.ProfitAndLoss(acc)
return pl.as_pdf(file_name = 'pl_out.pdf')
class DownloadBalanceSheetView(DownloadFinancialStatementView):
document_name = 'balance_sheet.pdf'
def create_file(self, request):
acc = get_account_for_user(request.user)
bs = financial_statements.BalanceSheet(acc)
bs.as_dict()
return bs.as_pdf(file_name = 'bs_out.pdf')
| StarcoderdataPython |
3210305 | import itertools as it
with open("./input.txt", "r") as inputFile:
readingsStr = inputFile.read().splitlines()
readings = map(int, readingsStr)
readingPairs = it.pairwise(readings)
increasingPairs = map(lambda pair : pair[1] > pair[0], readingPairs)
numOfIncreasingPairs = sum(increasingPairs)
print (numOfIncreasingPairs) | StarcoderdataPython |
196993 | import functools
import logging
from datetime import datetime
import django
from django.contrib import admin
from django.contrib import messages
from django.db.models import F
from django.http import HttpResponse
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from django_afip import exceptions
from django_afip import models
logger = logging.getLogger(__name__)
# TODO: Add an action to populate generic types.
def catch_errors(f):
"""
Catches specific errors in admin actions and shows a friendly error.
"""
@functools.wraps(f)
def wrapper(self, request, *args, **kwargs):
try:
return f(self, request, *args, **kwargs)
except exceptions.CertificateExpired:
self.message_user(
request,
_("The AFIP Taxpayer certificate has expired."),
messages.ERROR,
)
except exceptions.UntrustedCertificate:
self.message_user(
request,
_("The AFIP Taxpayer certificate is untrusted."),
messages.ERROR,
)
except exceptions.CorruptCertificate:
self.message_user(
request,
_("The AFIP Taxpayer certificate is corrupt."),
messages.ERROR,
)
except exceptions.AuthenticationError as e:
logger.exception("AFIP auth failed")
self.message_user(
request,
_("An unknown authentication error has ocurred: %s") % e,
messages.ERROR,
)
return wrapper
class VatInline(admin.TabularInline):
model = models.Vat
fields = (
"vat_type",
"base_amount",
"amount",
)
extra = 1
class TaxInline(admin.TabularInline):
model = models.Tax
fields = (
"tax_type",
"description",
"base_amount",
"aliquot",
"amount",
)
extra = 1
class ReceiptEntryInline(admin.TabularInline):
model = models.ReceiptEntry
fields = (
"description",
"quantity",
"unit_price",
"vat",
)
extra = 1
class ReceiptValidationInline(admin.StackedInline):
model = models.ReceiptValidation
readonly_fields = (
"result",
"processed_date",
"cae",
"cae_expiration",
"observations",
)
extra = 0
class ReceiptStatusFilter(admin.SimpleListFilter):
title = _("status")
parameter_name = "status"
VALIDATED = "validated"
NOT_VALIDATED = "not_validated"
def lookups(self, request, model_admin):
return (
(self.VALIDATED, _("Validated")),
(self.NOT_VALIDATED, _("Not validated")),
)
def queryset(self, request, queryset):
if self.value() == self.VALIDATED:
return queryset.filter(
validation__result=models.ReceiptValidation.RESULT_APPROVED
)
if self.value() == self.NOT_VALIDATED:
return queryset.exclude(
validation__result=models.ReceiptValidation.RESULT_APPROVED
)
class ReceiptTypeFilter(admin.SimpleListFilter):
title = models.ReceiptType._meta.verbose_name
parameter_name = "receipt_type"
def lookups(self, request, model_admin):
return (
(receipt_type.code, receipt_type.description)
for receipt_type in models.ReceiptType.objects.filter(
receipts__isnull=False,
).distinct()
)
def queryset(self, request, queryset):
value = self.value()
if value:
queryset = queryset.filter(receipt_type__code=value)
return queryset
@admin.register(models.Receipt)
class ReceiptAdmin(admin.ModelAdmin):
search_fields = ("receipt_number",)
list_display = (
"id",
"receipt_type",
"point_of_sales",
"number",
"issued_date",
"friendly_total_amount",
"validated",
"pdf_link",
)
list_filter = (
ReceiptStatusFilter,
ReceiptTypeFilter,
)
autocomplete_fields = (
"currency",
"receipt_type",
"related_receipts",
)
date_hierarchy = "issued_date"
def get_exclude(self, request, obj=None):
if django.VERSION < (2, 0):
# This field would load every single receipts for the widget which
# will always result in thousands of queries until an evetual OOM.
return ["related_receipts"]
return []
__related_fields = [
"validated",
"cae",
]
inlines = (
VatInline,
TaxInline,
ReceiptEntryInline,
ReceiptValidationInline,
)
ordering = ("-issued_date",)
readonly_fields = __related_fields
def get_queryset(self, request):
return (
super()
.get_queryset(request)
.select_related(
"receipt_type",
"point_of_sales",
)
.annotate(
pdf_id=F("receiptpdf__id"),
validation_result=F("validation__result"),
)
)
def number(self, obj):
return obj.formatted_number
number.short_description = _("receipt number")
number.admin_order_field = "receipt_number"
def friendly_total_amount(self, obj):
return "{:0.2f} ARS{}".format(
obj.total_amount * obj.currency_quote,
"*" if obj.currency_quote != 1 else "",
)
friendly_total_amount.short_description = _("total amount")
def validated(self, obj):
return obj.validation_result == models.ReceiptValidation.RESULT_APPROVED
validated.short_description = _("validated")
validated.admin_order_field = "validation__result"
validated.boolean = True
def pdf_link(self, obj):
if not obj.pdf_id:
return mark_safe(
'<a href="{}?receipt={}">{}</a>'.format(
reverse(self.admin_site.name + ":afip_receiptpdf_add"),
obj.id,
_("Create"),
)
)
return mark_safe(
'<a href="{}">{}</a>'.format(
reverse(
self.admin_site.name + ":afip_receiptpdf_change",
args=(obj.pdf_id,),
),
_("Edit"),
)
)
pdf_link.short_description = _("PDF")
pdf_link.admin_order_field = "receiptpdf__id"
def cae(self, obj):
return obj.validation.cae
cae.short_description = _("cae")
cae.admin_order_field = "validation__cae"
@catch_errors
def validate(self, request, queryset):
errs = queryset.validate()
if errs:
self.message_user(
request,
_("Receipt validation failed: %s.") % errs,
messages.ERROR,
)
validate.short_description = _("Validate")
actions = [validate]
@admin.register(models.AuthTicket)
class AuthTicketAdmin(admin.ModelAdmin):
list_display = (
"unique_id",
"owner",
"service",
"generated",
"expires",
)
@admin.register(models.TaxPayer)
class TaxPayerAdmin(admin.ModelAdmin):
list_display = (
"name",
"cuit",
"certificate_expiration",
)
@catch_errors
def fetch_points_of_sales(self, request, queryset):
poses = [
pos
for taxpayer in queryset.all()
for pos in taxpayer.fetch_points_of_sales()
]
created = len([pos for pos in poses if pos[1]])
skipped = len(poses) - created
self.message_user(
request,
message=(_("%d points of sales created.") % created),
level=messages.SUCCESS,
)
self.message_user(
request,
message=(_("%d points of sales already existed.") % skipped),
level=messages.WARNING,
)
fetch_points_of_sales.short_description = _("Fetch points of sales")
def generate_key(self, request, queryset):
key_count = sum(t.generate_key() for t in queryset.all())
if key_count == 1:
message = _("Key generated successfully.")
level = messages.SUCCESS
elif key_count == 1:
message = _("%d keys generated successfully.") % key_count
level = messages.SUCCESS
else:
message = _("No keys generated; Taxpayers already had keys.")
level = messages.ERROR
self.message_user(
request,
message=message,
level=level,
)
generate_key.short_description = _("Generate key")
def generate_csr(self, request, queryset):
if queryset.count() > 1:
self.message_user(
request,
message=_("Can only generate CSR for one taxpayer at a time."),
level=messages.ERROR,
)
return
taxpayer = queryset.first()
if not taxpayer.key:
taxpayer.generate_key()
csr = taxpayer.generate_csr()
filename = "cuit-{}-{}.csr".format(
taxpayer.cuit,
int(datetime.now().timestamp()),
)
response = HttpResponse(content_type="application/pkcs10")
response["Content-Disposition"] = "attachment; filename={}".format(filename)
response.write(csr.read())
return response
generate_csr.short_description = _("Generate CSR")
actions = (
fetch_points_of_sales,
generate_key,
generate_csr,
)
@admin.register(models.PointOfSales)
class PointOfSalesAdmin(admin.ModelAdmin):
list_display = (
"id",
"owner",
"number",
"issuance_type",
"blocked",
"drop_date",
)
@admin.register(models.CurrencyType)
class CurrencyTypeAdmin(admin.ModelAdmin):
search_fields = (
"code",
"description",
)
list_display = (
"code",
"description",
"valid_from",
"valid_to",
)
@admin.register(models.ReceiptType)
class ReceiptTypeAdmin(admin.ModelAdmin):
search_fields = (
"code",
"description",
)
list_display = (
"code",
"description",
"valid_from",
"valid_to",
)
class ReceiptHasFileFilter(admin.SimpleListFilter):
title = _("has file")
parameter_name = "has_file"
YES = "yes"
NO = "no"
def lookups(self, request, model_admin):
return (
(self.YES, _("Yes")),
(self.NO, _("No")),
)
def queryset(self, request, queryset):
if self.value() == self.YES:
return queryset.exclude(pdf_file="")
if self.value() == self.NO:
return queryset.filter(pdf_file="")
return queryset
@admin.register(models.ReceiptPDF)
class ReceiptPDFAdmin(admin.ModelAdmin):
list_display = (
"receipt_id",
"taxpayer",
"receipt",
"client_name",
"has_file",
)
list_filter = (
ReceiptHasFileFilter,
"receipt__point_of_sales__owner",
"receipt__receipt_type",
)
raw_id_fields = ("receipt",)
def get_queryset(self, request):
return (
super()
.get_queryset(request)
.select_related(
"receipt",
"receipt__point_of_sales__owner",
"receipt__receipt_type",
)
)
def taxpayer(self, obj):
return obj.receipt.point_of_sales.owner
taxpayer.short_description = models.TaxPayer._meta.verbose_name
def has_file(self, obj):
return bool(obj.pdf_file)
has_file.admin_order_field = "pdf_file"
has_file.boolean = True
has_file.short_description = _("Has file")
def generate_pdf(self, request, queryset):
for pdf in queryset:
pdf.save_pdf()
generate_pdf.short_description = _("Generate pdf")
actions = (generate_pdf,)
@admin.register(models.ReceiptValidation)
class ReceiptValidationAdmin(admin.ModelAdmin):
list_display = (
"id",
"receipt_number",
"successful",
"cae",
"processed_date",
)
raw_id_fields = ("receipt",)
def receipt_number(self, obj):
return obj.receipt.formatted_number
receipt_number.short_description = _("receipt number")
receipt_number.admin_order_field = "receipt_id"
def successful(self, obj):
return obj.result == models.ReceiptValidation.RESULT_APPROVED
successful.short_description = _("result")
successful.admin_order_field = "result"
successful.boolean = True
@admin.register(models.TaxPayerProfile)
class TaxPayerProfileAdmin(admin.ModelAdmin):
list_display = (
"taxpayer",
"issuing_name",
"issuing_email",
)
admin.site.register(models.ConceptType)
admin.site.register(models.DocumentType)
admin.site.register(models.VatType)
admin.site.register(models.TaxType)
admin.site.register(models.Observation)
| StarcoderdataPython |
3201460 | <gh_stars>1000+
import sys
import ctypes
from code import InteractiveConsole
from collections import deque
from threading import Thread, Lock, Event
from queue import SimpleQueue
from _godot import StdoutStderrCaptureToGodot, StdinCapture
from godot import exposed, export, ResourceLoader, VBoxContainer
from .plugin import BASE_RES
FONT = ResourceLoader.load(f"{BASE_RES}/hack_regular.tres")
class StdoutStderrCaptureToBufferAndPassthrough(StdoutStderrCaptureToGodot):
def __init__(self):
super().__init__()
self._buffer = ""
def _write(self, buff):
# _write always executed with _lock taken
super()._write(buff)
self._buffer += buff
def read_buffer(self):
with self._lock:
buffer = self._buffer
self._buffer = ""
return buffer
class StdinCaptureToBuffer(StdinCapture):
def __init__(self):
super().__init__()
self._lock = Lock()
self._has_data = Event()
self._buffer = ""
self._closed = False
def _read(self, size=-1):
if self._closed:
raise EOFError
if size < 0 or size > len(self._buffer):
data = self._buffer
self._buffer = ""
else:
data = self._buffer[:size]
self._buffer = self._buffer[size:]
if not self._buffer:
self._has_data.clear()
return data
def read(self, size=-1):
while True:
self._has_data.wait()
with self._lock:
# Check if a concurrent readinto has already processed the data
if not self._has_data.is_set():
continue
return self._read(size)
def readline(size=-1):
while True:
self._has_data.wait()
with self._lock:
# Check if a concurrent readinto has already processed the data
if not self._has_data.is_set():
continue
if size < 0:
size = len(self._buffer)
try:
size = min(size, self._buffer.index("\n") + 1)
except ValueError:
# \n not in self._buffer
pass
return self._read(size)
def write(self, buffer):
if not buffer:
return
with self._lock:
self._has_data.set()
self._buffer += buffer
def close(self):
self._closed = True
# Ensure read is waken up so it can raise EOFError
self._has_data.set()
class InteractiveConsoleInREPL(InteractiveConsole):
def __init__(self, repl_write, repl_read):
super().__init__(locals={"__name__": "__console__", "__doc__": None})
# Default write/raw_input relies on stderr/stdin, overwrite them
# to only talk with the REPL
self.write = repl_write
# Note overwritting `InteractiveConsole.raw_input` doesn't prevent
# from user code directly calling `input` (for instance when typing
# `help()` which makes use of a pager).
self.repl_read = repl_read
self.thread = None
def raw_input(self, prompt):
data = self.repl_read()
# Print the command line in the ouput box, this is needed given
# we have a separate input box that is cleared each time
# the user hit enter (unlike regular terminal where input and output
# are mixed together and enter only jumps to next line)
self.write(f"{prompt}{data}")
return data
def start_in_thread(self):
assert not self.thread
self.thread = Thread(target=self.interact)
self.thread.start()
def send_keyboard_interrupt(self):
# Inject a exception in the thread running the interpreter.
# This is not 100% perfect given the thread checks for exception only
# when it is actually running Python code so we cannot interrupt native
# code (for instance calling `time.sleep` cannot be interrupted)
ctypes.pythonapi.PyThreadState_SetAsyncExc(
self.thread.ident, ctypes.py_object(KeyboardInterrupt)
)
@exposed(tool=True)
class PythonREPL(VBoxContainer):
__STREAMS_CAPTURE_INSTALLED = False
def _enter_tree(self):
self.__plugin_instantiated = False
self.history = []
self.selected_history = 0
self.output_box = self.get_node("OutputBox")
self.output_box.add_font_override("normal_font", FONT)
self.output_box.add_font_override("mono_font", FONT)
self.run_button = self.get_node("FooterContainer/RunButton")
self.run_button.connect("pressed", self, "execute")
self.clear_button = self.get_node("HeaderContainer/ClearButton")
self.clear_button.connect("pressed", self, "clear")
self.interrupt_button = self.get_node("HeaderContainer/KeyboardInterruptButton")
self.interrupt_button.connect("pressed", self, "send_keyboard_interrupt")
self.input_box = self.get_node("FooterContainer/InputBox")
self.input_box.connect("text_entered", self, "execute")
# Hijack stdout/stderr/stdin streams
self.stdout_stderr_capture = StdoutStderrCaptureToBufferAndPassthrough()
self.stdin_capture = StdinCaptureToBuffer()
# Only overwrite streams if the scene has been created by the
# pythonscript_repl plugin. This avoid concurrent streams patching
# when the scene is opened from the editor (typically when we want
# to edit the repl GUI)
# TODO: find a way to differentiate plugin instantiated from other
# instantiations instead of relying on "first instantiated is plugin"
if not PythonREPL.__STREAMS_CAPTURE_INSTALLED:
PythonREPL.__STREAMS_CAPTURE_INSTALLED = True
self.__plugin_instantiated = True
self.stdout_stderr_capture.install()
self.stdin_capture.install()
# Finally start the Python interpreter, it must be running it in own
# thread given it does blocking reads on stdin
self.interpreter = InteractiveConsoleInREPL(
repl_write=self.write, repl_read=self.stdin_capture.read
)
self.interpreter.start_in_thread()
def _exit_tree(self):
# Closing our custom stdin stream should make `InteractiveConsole.interact`
# return, hence finishing the interpreter thread
self.stdin_capture.close()
self.interpreter.thread.join()
# Our custom stream capture must be removed before this node is destroyed,
# otherwise segfault will occur on next print !
if self.__plugin_instantiated:
PythonREPL.__STREAMS_CAPTURE_INSTALLED = False
self.stdout_stderr_capture.remove()
self.stdin_capture.remove()
def write(self, buffer):
for line in buffer.splitlines():
self.output_box.push_mono()
self.output_box.add_text(line)
self.output_box.newline()
self.output_box.pop()
def _process(self, delta):
if not hasattr(self, "stdout_stderr_capture"):
return
# Display new lines
self.write(self.stdout_stderr_capture.read_buffer())
def remove_last_line(self):
self.output_box.remove_line(self.output_box.get_line_count() - 2)
self.output_box.scroll_to_line(self.output_box.get_line_count() - 1)
def execute(self, *args, **kwargs):
string = str(self.input_box.get_text())
# Avoid adding multiple repeated entries to the command history
if not (len(self.history) > 0 and self.history[-1] == string):
self.history.append(string)
self.selected_history = 0
self.input_box.clear()
# Send the line into stdin and let the interpret do the rest
self.stdin_capture.write(string + "\n")
def up_pressed(self):
if len(self.history) >= abs(self.selected_history - 1):
self.selected_history -= 1
self.input_box.clear()
val = str(self.history[self.selected_history])
self.input_box.set_text(val)
self.input_box.set_cursor_position(len(val))
self.input_box.grab_focus()
def down_pressed(self):
if self.selected_history + 1 == 0:
self.selected_history += 1
self.input_box.clear()
elif self.selected_history + 1 < 0:
self.selected_history += 1
self.input_box.clear()
val = str(self.history[self.selected_history])
self.input_box.set_text(val)
self.input_box.set_cursor_position(len(val))
self.input_box.grab_focus()
def clear(self):
self.output_box.clear()
def send_keyboard_interrupt(self):
self.interpreter.send_keyboard_interrupt()
| StarcoderdataPython |
88875 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 15:42:46 2016
@author: ruben
"""
import os
import datetime as dt
import time as time_t
import pandas as pd
import numpy as np
import pytz
UNIDAD_ESTACIONES = 'Z:'
PATH_ESTACION_GEONICA = UNIDAD_ESTACIONES + '/geonica/'
PATH_ESTACION_HELIOS = UNIDAD_ESTACIONES + '/Estacion_Helios/'
PATH_ESTACION_METEO = UNIDAD_ESTACIONES + '/Datos Meteo IES/'
PATH_ESTACION_CAMPANYA = ''
PATH_ESTACION_FADRIQUE = ''
def persist_timeseries_to_file(filename_cache=None):
"""
Persits a Pandas DataFrame object returned by a function into cache file
using a decorator, so it decorates the function that returns the
pd.DataFrame.
The function receives some extra parameters to be used by the
decorator (and to make it explicit it is advised to add them in the
definition of the function even if they are not used in the non-cached
version). This approach allows to modify them in each instance of the
function:
- enable_cache=False : actually enables the cache (allows to choose it)
- path_cache=None : path where the cache file is saved. If None, it takes
the current path
- update_cache=False : It forces to update the cache file, even if there
are data in it
Also time : pd.DatetimeIndex that is the index of the pd.DataFrame should
be the name of the parameter in the original function
Parameters
----------
filename_cache : String, default None
Name of cache file
Returns
-------
decorator : function
Function that will persist data into cache file
"""
from functools import wraps
if filename_cache is None:
raise ValueError('A cache-file name is required.')
persistence_type = filename_cache.split('.')[1]
def decorator(original_func):
"""
Decorator function
"""
# The main intended use for @wraps() is to wrap the decorated function
# and return the wrapper.
# If the wrapper function is not updated, the metadata of the returned
# function will reflect the wrapper definition rather than the original
# function definition, which is typically less than helpful.
@wraps(original_func)
def new_func(time, enable_cache=False, path_cache=None,
update_cache=False, verbose_cache=False, *args, **kwargs):
"""
Decorated function
"""
if not enable_cache:
return original_func(time, *args, **kwargs)
if path_cache is None:
path_cache = os.path.abspath('')
if not os.path.exists(path_cache):
os.makedirs(path_cache)
path_file_cache = os.path.join(path_cache, filename_cache)
if verbose_cache:
print('> Path cache:', path_file_cache)
try:
if persistence_type == 'csv':
cache = pd.read_csv(path_file_cache, index_col=0, parse_dates=True)
elif persistence_type == 'pickle':
cache = pd.read_pickle(path_file_cache)
elif persistence_type == 'json':
cache = pd.read_json(path_file_cache)
else:
raise ValueError('Unknown type of persistence', persistence_type)
if verbose_cache:
print('> Reading cache...')
except (IOError, ValueError):
if verbose_cache:
print('> Cache empty')
cache = pd.DataFrame()
if not update_cache:
if time.isin(cache.index).all():
data = cache.loc[time]
if verbose_cache:
print('> Cache contains requested data')
else:
if verbose_cache:
print('> Reading data source...')
data = original_func(time, **kwargs)
if not data.empty:
if persistence_type == 'csv':
pd.concat([data, cache], join='inner').to_csv(path_file_cache)
elif persistence_type == 'pickle':
pd.concat([data, cache], join='inner').to_pickle(path_file_cache)
elif persistence_type == 'json':
pd.concat([data, cache], join='inner').to_json(path_file_cache)
else:
raise ValueError('Unknown type of persistence', persistence_type)
if verbose_cache:
print('> Updating cache with requested data...')
else:
if verbose_cache:
print('> Cache not updated because requested data is empty')
else:
data = original_func(time, **kwargs)
if persistence_type == 'csv':
data.to_csv(path_file_cache)
elif persistence_type == 'pickle':
data.to_pickle(path_file_cache)
elif persistence_type == 'json':
data.to_json(path_file_cache)
if verbose_cache:
print('> Saving data in cache...')
return data
return new_func
return decorator
@persist_timeseries_to_file(filename_cache='cache_geonica.csv')
def lee_geonica(time, path_estacion=None, enable_cache=False, path_cache=None, update_cache=False):
return lee_estacion(time, tipo_estacion='geonica', path_estacion=path_estacion)
@persist_timeseries_to_file(filename_cache='cache_helios.csv')
def lee_helios(time, path_estacion=None, enable_cache=False, path_cache=None, update_cache=False):
return lee_estacion(time, tipo_estacion='helios', path_estacion=path_estacion)
@persist_timeseries_to_file(filename_cache='cache_meteo.csv')
def lee_meteo(time, path_estacion=None, enable_cache=False, path_cache=None, update_cache=False):
return lee_estacion(time, tipo_estacion='meteo', path_estacion=path_estacion)
@persist_timeseries_to_file(filename_cache='cache_campanya.csv')
def lee_campanya(time, path_estacion=None, enable_cache=False, path_cache=None, update_cache=False):
return lee_estacion(time, tipo_estacion='campanya', path_estacion=path_estacion)
@persist_timeseries_to_file(filename_cache='cache_fadrique.csv')
def lee_fadrique(time, path_estacion=None, enable_cache=False, path_cache=None, update_cache=False):
return lee_estacion(time, tipo_estacion='fadrique', path_estacion=path_estacion)
def lee_estacion(time, tipo_estacion=None, path_estacion=None, muestra_tiempo_lectura=False):
# copiado el 24/02/16 de funciones_solares.py
"""
Obtiene datos de la estación para los momentos solicitados
Parameters
----------
time : pandas.Index
Lista de momentos a leer
path_estacion : string (default)
Ruta de donde están los ficheros de la estación
Returns
-------
todo : pandas.DataFrame
Los valores que corresponden a 'time'
See Also
--------
Examples
--------
>>> import pandas as pd
>>> time = pd.date_range(start='2014/01/01', end='2014/01/31', freq='1T')
>>> leido = lee_estacion(time, tipo_estacion='geonica')
"""
if tipo_estacion == 'geonica':
if path_estacion is None:
path_estacion = PATH_ESTACION_GEONICA
elif tipo_estacion == 'helios':
if path_estacion is None:
path_estacion = PATH_ESTACION_HELIOS
elif tipo_estacion == 'meteo':
if path_estacion is None:
path_estacion = PATH_ESTACION_METEO
elif tipo_estacion == 'campanya':
if path_estacion is None:
path_estacion = PATH_ESTACION_CAMPANYA
elif tipo_estacion == 'fadrique':
if path_estacion is None:
path_estacion = PATH_ESTACION_FADRIQUE
elif tipo_estacion is None:
raise ValueError("Elige un tipo de estación: tipo_estacion='meteo', 'geonica', 'campanya', 'fadrique'")
lista_fechas_time = np.unique(time.date)
if tipo_estacion == 'geonica':
ayer = time.date[0] - dt.timedelta(days=1)
lista_fechas_time = np.append(lista_fechas_time, ayer) #añade a la lista de fechas el dia anterior, para cuando se obtiene tiempo UTC haya datas que obtener
# print(lista_fechas_time)
def parserdatetime(date_string): # date, time strings from file - returns datetime composed as combination of datetime.date() and datetime.time()
return dt.datetime.strptime(date_string, "%Y/%m/%d %H:%M")
todo = pd.DataFrame([])
todo.index.name = 'datetime'
start_time = time_t.time()
for fecha in lista_fechas_time:
if fecha.year == dt.date.today().year:
path = path_estacion
elif tipo_estacion == 'geonica':
path = path_estacion + str(fecha.year) + '/'
elif tipo_estacion == 'helios':
path = path_estacion + 'Data' + str(fecha.year) + '/'
elif tipo_estacion == 'meteo':
path = path_estacion + str(fecha.year) + '/'
else:
path = path_estacion
if tipo_estacion == 'helios':
file = path + 'data' + dt.datetime.strftime(fecha, '%Y_%m_%d') + '.txt'
else:
file = path + tipo_estacion + dt.datetime.strftime(fecha, '%Y_%m_%d') + '.txt'
try:
if tipo_estacion == 'meteo':
dia = pd.read_csv(file, parse_dates=[0], index_col=0, delimiter='\t')#, usecols=variables) # ignora usecols para evitar pd_issue#14792
else:
dia = pd.read_csv(file, date_parser=parserdatetime, parse_dates=[[0, 1]], index_col=0, delimiter='\t')#, usecols=variables) # ignora usecols para evitar pd_issue#14792
except (IOError):
print('No se encuentra el fichero: ', file)
dia = pd.DataFrame(index=pd.date_range(start=fecha, end=dt.datetime.combine(fecha, dt.time(23, 59)), freq='1T')) #cuando no hay fichero, se llena de valores vacios
except TypeError:
print('Fichero con datos mal formados: ', file)
dia = pd.DataFrame(index=pd.date_range(start=fecha, end=dt.datetime.combine(fecha, dt.time(23, 59)), freq='1T')) #cuando no hay fichero, se llena de valores vacios
except pd.errors.EmptyDataError:
print('Fichero de datos vacío: ', file)
dia.index.name = 'datetime'
todo = pd.concat([todo, dia]).sort_index()
if tipo_estacion == 'geonica':
todo.index = (todo.index.tz_localize(pytz.utc).
tz_convert(pytz.timezone('Europe/Madrid')).
tz_localize(None))
# todo = change_datetimeindex(todo, mode='utc->civil')
todo = todo[~todo.index.duplicated()].reindex(time)
if muestra_tiempo_lectura:
print("Tiempo leyendo estación (s): {:.1f}".format(time_t.time() - start_time))
return todo
| StarcoderdataPython |
1742799 | from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from rest_framework import permissions, status, response, views
from rest_framework.authtoken.models import Token
class LogoutViewSet(views.APIView):
permission_classes = [permissions.IsAuthenticated,]
def post(self, request):
# Step 1: Delete the "auth_token" so our RESTFul API won't have a key.
Token.objects.filter(user=request.user).delete()
# Step 2: Close the Django session.
logout(request)
# Step 3: Return success message.
return response.Response(status=status.HTTP_200_OK)
| StarcoderdataPython |
1651526 | """
Copyright, the CVXPY authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms.elementwise.inv_pos import inv_pos
from cvxpy.atoms.elementwise.power import power
from cvxpy.atoms.geo_mean import geo_mean
def inv_prod(value):
"""The reciprocal of a product of the entries of a vector ``x``.
Parameters
----------
x : Expression or numeric
The expression whose reciprocal product is to be computed. Must have
positive entries.
Returns
-------
Expression
.. math::
\\left(\\prod_{i=1}^n x_i\\right)^{-1},
where :math:`n` is the length of :math:`x`.
"""
return power(inv_pos(geo_mean(value)), int(sum(value.shape)))
| StarcoderdataPython |
1669198 | <filename>2017/day20.py
"""
http://adventofcode.com/2017/day/20
"""
import re
from collections import Counter
from typing import NamedTuple, Tuple, List
class Particle(NamedTuple):
pos: Tuple[int, int, int]
vel: Tuple[int, int, int]
acc: Tuple[int, int, int]
id: int
def step(p: Particle) -> Particle:
vel = (p.vel[0]+p.acc[0], p.vel[1]+p.acc[1], p.vel[2]+p.acc[2])
pos = (p.pos[0]+vel[0], p.pos[1]+vel[1], p.pos[2]+vel[2])
return Particle(pos, vel, p.acc, p.id)
def getDist(p: Particle) -> int:
return sum([abs(i) for i in p.pos])
def parse(input: str, id: int) -> Particle:
regex = "(-?[0-9]+,-?[0-9]+,-?[0-9]+)"
pos,vel,acc = re.findall(regex, input)
pos = tuple(int(i) for i in pos.split(","))
vel = tuple(int(i) for i in vel.split(","))
acc = tuple(int(i) for i in acc.split(","))
return Particle(pos, vel, acc, id)
def removeCollided(pList: List[Particle]) -> List[Particle]:
allPos = Counter([p.pos for p in pList])
particles = [p for p in pList if allPos[p.pos]==1]
return particles
if __name__ == "__main__":
# particles = TEST_INPUT.split("\n")
with open("day20_input.txt") as f:
raw = f.readlines()
# part 1
particles = [parse(p, i) for i,p in enumerate(raw)]
for i in range(1000):
particles = [step(p) for p in particles]
distances = list(getDist(p) for p in particles)
print(f"particle id which stay closer to zero: {distances.index(min(distances))}")
# part 2
particles = [parse(p, i) for i,p in enumerate(raw)]
print(f"starting number of particles: {len(particles)}")
for i in range(1000):
particles = [step(p) for p in particles]
particles = removeCollided(particles)
print(f"number of particles after resolving collisions: {len(particles)}")
| StarcoderdataPython |
4829688 | <filename>async_signalr_client/models/futures/__init__.py
from .completions import InvokeCompletionFuture
__all__ = [
"InvokeCompletionFuture"
]
| StarcoderdataPython |
1784347 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
"""
This module interacts with hrpws restclient for employee appointments
"""
import logging
import traceback
from sis_provisioner.dao import (
DataFailureException, InvalidRegID, changed_since_str)
from uw_hrp.worker import get_worker_by_regid, worker_search
from sis_provisioner.util.log import log_exception, log_resp_time, Timer
logger = logging.getLogger(__name__)
def get_worker(person):
"""
Return the Appointee for the given Person object
"""
try:
if person.is_emp_state_current():
return get_worker_by_regid(person.uwregid)
except InvalidRegID:
logger.error("'{0}' has invalid uwregid".format(person.uwnetid))
except DataFailureException as ex:
log_exception(
logger,
"Failed to get worker for '{0}'".format(person.uwnetid),
traceback.format_exc(chain=False))
return None
def get_worker_updates(duration):
"""
duration: time range in minutes
Return a list of WorkerRef objects
"""
timer = Timer()
try:
return worker_search(changed_since=changed_since_str(duration))
except Exception:
log_exception(logger, "get_worker_updates",
traceback.format_exc(chain=False))
raise
finally:
log_resp_time(logger, "get_worker_updates", timer)
return []
| StarcoderdataPython |
150072 | from . import channels
from . import paillier
| StarcoderdataPython |
1672565 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.framework import function
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_logging_ops
def _OptimizerOptions():
for cse in [False, True]:
for inline in [False, True]:
for cfold in [False, True]:
yield tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=cse,
do_function_inlining=inline,
do_constant_folding=cfold)))
class FunctionTest(tf.test.TestCase):
def _mat(self, x):
return np.array([x]).astype("float32").reshape([1, 1])
def testBasic(self):
g = tf.Graph()
# Define a function
# foo(a:float, b:float, c:float)->u:float,v:float,w:float
# u = matmul(a, b) + c
# v = u^2
# w = u + v
foo = tf.Graph()
with foo.as_default():
a = tf.placeholder(tf.float32, name="a")
b = tf.placeholder(tf.float32, name="b")
c = tf.placeholder(tf.float32, name="c")
u = tf.add(tf.matmul(a, b), c, name="u")
v = tf.square(u, name="v")
w = tf.add_n([u, v], name="w")
fdef = function._graph_to_function_def(foo, "foo", [a, b, c], [u, v, w])
class Mock(function._DefinedFunction):
def __init__(self, fdef):
self._func_name = "foo"
self._definition = fdef
self._sub_functions = collections.OrderedDict()
self._grad_func = None
self._python_grad_func = None
self._hash = hash(fdef.SerializeToString())
g._add_function(Mock(fdef))
# Compute 2 * 3 + 4 and its square.
with g.as_default(), tf.Session() as sess:
two = tf.constant(self._mat(2.0), name="two")
three = tf.constant(self._mat(3.0), name="three")
four = tf.constant(self._mat(4.0), name="four")
# TODO(zhifengc): w/ @decorator sugar, we will just do:
# y, s, t = foo_func(two, three, four)
# The graph contains two ops each of which calls foo.
u0, v0, w0 = g.create_op(
"foo", [two, three, four], [tf.float32, tf.float32, tf.float32],
compute_shapes=False).outputs
u1, v1, w1 = g.create_op(
"foo", [four, two, three], [tf.float32, tf.float32, tf.float32],
compute_shapes=False).outputs
# Checks some property of the graph def.
gdef = g.as_graph_def()
self.assertEqual(len(gdef.node), 5) # 5 nodes added.
self.assertEqual(len(gdef.library.function), 1) # 1 function is defined.
for _ in xrange(10):
# Run the graph, which is basically two function calls.
ans_u0, ans_v0, ans_w0, ans_u1, ans_v1, ans_w1 = sess.run([u0, v0, w0,
u1, v1, w1])
self.assertAllEqual(ans_u0, self._mat(10.0)) # 2 * 3 + 4 = 10
self.assertAllEqual(ans_v0, self._mat(100.0)) # 10^2 = 100
self.assertAllEqual(ans_w0, self._mat(110.0)) # 100 + 10 = 110
self.assertAllEqual(ans_u1, self._mat(11.0)) # 4 * 2 + 3 = 11
self.assertAllEqual(ans_v1, self._mat(121.0)) # 11^2 = 121
self.assertAllEqual(ans_w1, self._mat(132.0)) # 11 + 121 = 132
def testDefineFunction2Args(self):
@function.Defun(tf.float32, tf.float32)
def APlus2B(a, b):
return a + b * 2
with tf.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEquals("APlus2B", call.op.name)
with tf.Session() as sess:
self.assertAllEqual([5.0], sess.run(call))
def testGradientFunc(self):
@function.Defun(tf.float32, func_name="XSquarePlusOneFn")
def XSquarePlusOne(x):
return x * x + 1.0
@function.Defun(tf.float32, tf.float32)
def XSquarePlusOneGrad(x, dy):
dx = functional_ops._symbolic_gradient(
input=[x, dy], Tout=[tf.float32], f="XSquarePlusOneFn", name="dx")
return dx
g = tf.Graph()
with g.as_default():
call_f = XSquarePlusOne([2.0])
call_g = XSquarePlusOneGrad([2.0], [0.1])
with tf.Session() as sess:
self.assertAllClose([5.0], sess.run(call_f))
self.assertAllClose([0.4], sess.run(call_g))
def testTanhSymGrad(self):
@function.Defun(tf.float32)
def Forward(x):
return tf.reduce_sum(tf.tanh(x))
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32)
y = Forward(x)
dx = tf.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=True)))
with tf.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
def testCustomGradient(self):
dtype = tf.float32
@function.Defun(dtype, dtype, dtype)
def XentLossGrad(logits, labels, dloss):
dlogits = tf.reshape(dloss, [-1, 1]) * (tf.nn.softmax(logits) - labels)
dlabels = tf.zeros_like(labels)
# Takes exp(dlogits) to differentiate it from the "correct" gradient.
return tf.exp(dlogits), dlabels
@function.Defun(dtype, dtype, grad_func=XentLossGrad)
def XentLoss(logits, labels):
return tf.reduce_sum(labels * tf.log(tf.nn.softmax(logits)), 1)
g = tf.Graph()
with g.as_default():
logits = tf.placeholder(dtype)
labels = tf.placeholder(dtype)
loss = XentLoss(logits, labels)
dlogits = tf.gradients([loss], [logits])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1)
y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
for cfg in _OptimizerOptions():
print("cfg = ", cfg)
with tf.Session(graph=g, config=cfg) as sess:
out, = sess.run(dlogits, {logits: x, labels: y})
self.assertAllClose(out, np.exp(prob - y))
def testCustomGradientError(self):
dtype = tf.float32
@function.Defun(dtype, dtype, dtype)
def Grad(x, dy, dz):
# Should have returned 1 result.
return x, dy + dz
@function.Defun(dtype, grad_func=Grad)
def Forward(x):
return x, x
g = tf.Graph()
with g.as_default():
inp = tf.placeholder(dtype)
out = tf.add_n(Forward(inp))
dinp = tf.gradients(out, [inp])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
with tf.Session(graph=g) as sess:
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"SymGrad expects to return 1.*but get 2.*instead"):
_ = sess.run(dinp, {inp: x})
def testSymGradShape(self):
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32, [25, 4])
y = tf.placeholder(tf.float32, [200, 100])
dz = tf.placeholder(tf.float32, [1])
# We assume Foo is a function of (x, y) -> (z) Then, Foo's
# gradient function is (x, y, dz) -> (dx, dy). dx's shape
# should be the same as x's; and dy's shape should be the same
# as y's.
dx, dy = functional_ops._symbolic_gradient(
input=[x, y, dz], Tout=[tf.float32] * 2, f="Foo")
self.assertEquals(x.get_shape(), dx.get_shape())
self.assertEquals(y.get_shape(), dy.get_shape())
def testZNoDepOnY(self):
@function.Defun(tf.float32, tf.float32)
def Foo(x, y): # pylint: disable=unused-argument
return x * 2
with tf.Graph().as_default():
# z = Foo(x, y). z doe
x = tf.constant(1.0)
y = tf.constant(2.0)
z = Foo(x, y)
dx, dy = tf.gradients([z], [x, y])
with tf.Session() as sess:
dx_val, dy_val = sess.run([dx, dy])
self.assertEquals([2.0], dx_val)
self.assertEquals([0.0], dy_val)
def testDefineFunctionNoArgs(self):
@function.Defun()
def AConstant():
return tf.constant([42])
with tf.Graph().as_default():
call = AConstant()
self.assertEquals("AConstant", call.op.name)
with tf.Session() as sess:
self.assertAllEqual([42], sess.run(call))
def testDefineFunctionNames(self):
@function.Defun(tf.float32)
def Foo(a):
return a + 1
with tf.Graph().as_default():
call1 = Foo([1.0])
self.assertEquals("Foo", call1.op.name)
call2 = Foo([1.0])
self.assertEquals("Foo_1", call2.op.name)
# pylint: disable=unexpected-keyword-arg
call3 = Foo([1.0], name="mine")
self.assertEquals("mine", call3.op.name)
with tf.name_scope("my"):
call4 = Foo([1.0], name="precious")
self.assertEquals("my/precious", call4.op.name)
def testNoOp(self):
@function.Defun(tf.float32)
def Foo(x):
y = tf.Print(x, [x], "Hello")
with tf.control_dependencies([y]):
z = tf.no_op()
with tf.control_dependencies([z]):
return x * 2
with tf.Graph().as_default(), self.test_session():
z = Foo(tf.constant(3.0))
self.assertAllEqual(z.eval(), 6.0)
def testAssert(self):
@function.Defun(tf.float32)
def Foo(x):
check = gen_logging_ops._assert(tf.greater(x, 0), [x])
with tf.control_dependencies([check]):
return x * 2
g = tf.Graph()
with g.as_default(), self.test_session():
self.assertAllEqual(Foo(tf.constant(3.0)).eval(), 6.0)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"assertion failed.*-3"):
self.assertAllEqual(Foo(tf.constant(-3.0)).eval(), 6.0)
def testVar(self):
@function.Defun(tf.float32)
def Foo(x):
return x * x + 1
g = tf.Graph()
with g.as_default():
v = tf.Variable(tf.constant(10.0))
z = Foo(v)
with self.test_session(graph=g):
tf.initialize_all_variables().run()
self.assertAllEqual(z.eval(), 101.)
def testDefineErrors(self):
with tf.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "return at least one tensor"):
@function.Defun()
def NoResult():
pass
_ = NoResult.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def DefaultArg(unused_a=12):
return tf.constant([1])
_ = DefaultArg.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def KwArgs(**unused_kwargs):
return tf.constant([1])
_ = KwArgs.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun()
def PlusMinusV1(a, b):
return a + b, b - a
_ = PlusMinusV1.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(tf.float32)
def PlusMinusV2(a, b):
return a + b, b - a
_ = PlusMinusV2.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(tf.float32, tf.float32, tf.float32)
def PlusMinusV3(a, b):
return a + b, b - a
_ = PlusMinusV3.definition
def testCallErrors(self):
@function.Defun()
def Const():
return tf.constant(1)
@function.Defun(tf.int32)
def PlusOne(a):
return a + 1
@function.Defun(tf.int32, tf.int32)
def PlusMinus(a, b):
return a + b, b - a
with tf.Graph().as_default():
_ = Const()
# pylint: disable=too-many-function-args
# pylint: disable=unexpected-keyword-arg
# pylint: disable=no-value-for-parameter
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1)
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne()
_ = PlusOne(1)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus()
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus(1)
_ = PlusMinus(1, 2)
_ = PlusOne(1, name="p1")
with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"):
_ = PlusOne(1, device="/gpu:0")
def testDupDefinition(self):
@function.Defun(tf.float32)
def Foo(x):
return x + 1
@function.Defun(tf.float32, func_name="Foo")
def Bar(x):
return x + 1
@function.Defun(tf.float32, func_name="Foo")
def Baz(x):
return x + 2
with tf.Graph().as_default():
y = Foo(100.0)
z = Bar(100.0) # OK.
with self.test_session():
self.assertAllEqual(y.eval(), z.eval())
with self.assertRaisesRegexp(ValueError, "already defined"):
z = Baz(100.0)
def testFunctionDecorator(self):
@function.Defun(tf.float32)
def Minus1(b):
return b - 1.0
with tf.Graph().as_default():
call1 = Minus1([2.])
self.assertTrue(isinstance(Minus1, function._DefinedFunction))
self.assertEqual(Minus1.name, "Minus1")
# pylint: disable=unexpected-keyword-arg
call2 = Minus1(call1, name="next")
# pylint: enable=unexpected-keyword-arg
self.assertEquals("next", call2.op.name)
with tf.Session() as sess:
self.assertAllEqual([1], sess.run(call1))
self.assertAllEqual([0], sess.run(call2))
def testNestedFunction(self):
@function.Defun(tf.float32)
def Cube(x):
return x * x * x
@function.Defun(tf.float32, tf.float32)
def CubeXPlusY(x, y):
return Cube(x) + y
with tf.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.test_session():
self.assertAllEqual(z.eval(), 25.0)
def testNestedDefinedFunction(self):
@function.Defun(tf.float32, tf.float32)
def CubeXPlusY(x, y):
@function.Defun(tf.float32)
def Cube(x):
return x * x * x
return Cube(x) + y
with tf.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.test_session():
self.assertAllEqual(z.eval(), 25.0)
def testUnusedFunction(self):
invoked = False
# pylint: disable=unused-variable
@function.Defun()
def Unused():
invoked = True
return tf.constant(42.)
self.assertFalse(invoked)
g = tf.Graph()
with g.as_default():
@function.Defun()
def Unused2():
invoked = True
return tf.constant(7.)
tf.constant(3.)
# pylint: enable=unused-variable
self.assertFalse(invoked)
gdef = g.as_graph_def()
self.assertEquals(0, len(gdef.library.function))
def testReduction(self):
g = tf.Graph()
# BN0 is computing batch normed matrix along rows.
def BN0(x):
mean = tf.reduce_mean(x, [0])
var = tf.reduce_mean(tf.square(x - mean)) # biased var
rstd = tf.rsqrt(var + 1e-8)
return (x - mean) * rstd
# Wraps BatchNorm in a tf function.
@function.Defun(tf.float32)
def BN1(x):
return BN0(x)
with g.as_default():
x = tf.placeholder(tf.float32)
y0 = BN0(x) # A plain graph
y1 = BN1(x) # A tf function
dx0, = tf.gradients([y0], [x])
dx1, = tf.gradients([y1], [x])
# Both should produce the same result and gradient.
with self.test_session(graph=g) as sess:
vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))})
self.assertAllClose(vals[0], vals[1])
self.assertAllClose(vals[2], vals[3])
def testDeclareTypeMistake(self):
foo = function.Declare("Foo", [tf.float32], [tf.float32])
@function.Defun(tf.float32)
def Foo(x):
return x * x + 1
g = tf.Graph()
with g.as_default():
y = foo(2.0)
with self.test_session(graph=g):
with self.assertRaisesRegexp(tf.errors.NotFoundError, "not registered"):
_ = y.eval()
g = tf.Graph()
with g.as_default():
Foo.add_to_graph(g)
y = foo(2)
with self.test_session(graph=g):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"int32.*float"):
_ = y.eval()
g = tf.Graph()
with g.as_default():
Foo.add_to_graph(g)
with self.assertRaisesRegexp(
ValueError, "Expected number of arguments: 1, received: 2"):
_ = foo(2.0, 2.0)
g = tf.Graph()
with g.as_default():
Foo.add_to_graph(g)
y = foo(2.0)
with self.test_session(graph=g):
self.assertAllEqual(y.eval(), 5.0)
class UnrollLSTMTest(tf.test.TestCase):
BATCH_SIZE = 16
LSTM_DIMS = 32
NUM_UNROLL = 20
def _Weights(self):
dims = self.LSTM_DIMS
return tf.random_uniform([2 * dims, 4 * dims], -1, 1, seed=123456)
def _Input(self):
return tf.random_uniform(
[self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS], seed=654321)
# Helper to construct a LSTM cell graph.
@classmethod
def LSTMCell(cls, x, mprev, cprev, weights):
xm = tf.concat(1, [x, mprev])
i_i, i_g, f_g, o_g = tf.split(1, 4, tf.matmul(xm, weights))
new_c = tf.sigmoid(f_g) * cprev + tf.sigmoid(i_g) * tf.tanh(i_i)
new_c = tf.clip_by_value(new_c, -50.0, 50.0)
new_m = tf.sigmoid(o_g) * tf.tanh(new_c)
return new_m, new_c
def _BuildForward(self, weights, inp, mode="cell"):
def Loop(cell, w, i):
x = tf.unpack(i, self.NUM_UNROLL)
m = tf.zeros_like(x[0])
c = tf.zeros_like(x[0])
for i in range(self.NUM_UNROLL):
m, c = cell(x[i], m, c, w)
return m
cell = UnrollLSTMTest.LSTMCell
if mode == "complete":
# Constructs the complete graph in python.
return Loop(cell, weights, inp)
cell = function.Defun(tf.float32, tf.float32, tf.float32, tf.float32)(cell)
if mode == "cell":
# Just represent the LSTM as a function.
return Loop(cell, weights, inp)
if mode == "loop":
# Wraps the whole loop as a function.
@function.Defun(tf.float32, tf.float32)
def LSTMLoop(w, i):
return Loop(cell, w, i)
return LSTMLoop(weights, inp)
if mode == "loop10":
# Wraps 10 lstm steps into one function, and the whole loop
# into another calling the formers.
# Groups 10 steps at a time.
@function.Defun(tf.float32, tf.float32, tf.float32, *([tf.float32] * 10))
def Loop10(w, m, c, *args):
for x in args:
m, c = cell(x, m, c, w)
return m, c
@function.Defun(tf.float32, tf.float32)
def LSTMLoop10(weights, inp):
x = tf.unpack(inp, self.NUM_UNROLL)
m = tf.zeros_like(x[0])
c = tf.zeros_like(x[0])
assert self.NUM_UNROLL % 10 == 0
for i in range(0, self.NUM_UNROLL, 10):
m, c = Loop10(weights, m, c, *x[i:i + 10])
return m
return LSTMLoop10(weights, inp)
def testUnrollLSTM(self):
# Run one step of the unrolled lstm graph.
def RunForward(mode, cfg=None):
print("mode = ", mode)
g = tf.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
gdef = g.as_graph_def()
finish = time.time()
print("time: ", finish - start, " txt size: ", len(str(gdef)),
"gdef bin size: ", len(gdef.SerializeToString()))
with g.as_default(), tf.Session(config=cfg) as sess:
return sess.run(m)
mv0 = RunForward("complete")
for cfg in _OptimizerOptions():
print("cfg = ", cfg)
mv1 = RunForward("cell", cfg)
mv2 = RunForward("loop", cfg)
mv3 = RunForward("loop10", cfg)
self.assertAllClose(mv0, mv1, rtol=1e-4)
self.assertAllClose(mv0, mv2, rtol=1e-4)
self.assertAllClose(mv0, mv3, rtol=1e-4)
def testUnrollLSTMGrad(self):
# Run one step of the unrolled lstm graph.
def RunForwardBackward(mode, cfg=None):
print("mode = ", mode)
g = tf.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
loss = tf.reduce_sum(tf.square(m))
dw = tf.gradients([loss], [weights])
gdef = g.as_graph_def()
finish = time.time()
print("time: ", finish - start, " txt size: ", len(str(gdef)),
"gdef bin size: ", len(gdef.SerializeToString()))
with g.as_default(), tf.Session(config=cfg) as sess:
return sess.run(dw)
d0 = RunForwardBackward("complete")
for cfg in _OptimizerOptions():
print("cfg = ", cfg)
d1 = RunForwardBackward("cell", cfg)
d2 = RunForwardBackward("loop", cfg)
d3 = RunForwardBackward("loop10", cfg)
self.assertAllClose(d0, d1, rtol=1e-4)
self.assertAllClose(d0, d2, rtol=1e-4)
self.assertAllClose(d0, d3, rtol=1e-4)
class FunctionInlineControlTest(tf.test.TestCase):
def testFoo(self):
dtype = tf.float32
cfg = tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
for noinline in [False, True]:
# pylint: disable=unexpected-keyword-arg
@function.Defun(dtype, noinline=noinline)
def Cell(v):
# If v is a vector [n, 1], x is a big square matrix.
x = tf.tanh(v + tf.transpose(v, [1, 0]))
return tf.reduce_sum(x, 1, keep_dims=True)
@function.Defun(dtype)
def Forward(x):
for _ in range(10):
# pylint: disable=cell-var-from-loop
x = Cell(x)
return tf.reduce_sum(x, [0, 1])
g = tf.Graph()
with g.as_default():
x = tf.placeholder(dtype)
y = Forward(x)
dx, = tf.gradients([y], [x])
np.random.seed(321)
inp = np.random.uniform(-1, 1, [16, 1]).astype(np.float32)
with tf.Session(graph=g, config=cfg) as sess:
ans = sess.run([y, dx], {x: inp})
print(ans[0], np.sum(ans[1]))
self.assertAllClose(ans[0], 255.971, rtol=1e-3)
self.assertAllClose(np.sum(ans[1]), 13.0408, rtol=1e-3)
@function.Defun(*[tf.float32] * 3)
def Linear(w, b, x):
return tf.nn.relu(tf.matmul(x, w) + b)
@function.Defun(*[tf.float32] * 5)
def Linear2(w1, b1, w2, b2, x):
return Linear(w2, b2, Linear(w1, b1, x))
class ModuleFunctionTest(tf.test.TestCase):
def testBasic(self):
with tf.Graph().as_default():
a, b, c, d, e = [tf.constant([[_]], dtype=tf.float32) for _ in range(5)]
y = Linear(a, b, c)
z = Linear2(a, b, c, d, e)
with tf.Session() as sess:
self.assertAllEqual([[1]], sess.run(y))
self.assertAllEqual([[5]], sess.run(z))
if __name__ == "__main__":
tf.test.main()
| StarcoderdataPython |
3361661 | from django.apps import AppConfig
class NSFaultManagementConfig(AppConfig):
name = 'NSFaultManagement'
| StarcoderdataPython |
1656856 | import pyblish.api
import openpype.api
import hou
from openpype.hosts.houdini.api import lib
class CollectRemotePublishSettings(pyblish.api.ContextPlugin):
"""Collect custom settings of the Remote Publish node."""
order = pyblish.api.CollectorOrder
families = ["*"]
hosts = ["houdini"]
targets = ["deadline"]
label = "Remote Publish Submission Settings"
actions = [openpype.api.RepairAction]
def process(self, context):
node = hou.node("/out/REMOTE_PUBLISH")
if not node:
return
attributes = lib.read(node)
# Debug the settings we have collected
for key, value in sorted(attributes.items()):
self.log.debug("Collected %s: %s" % (key, value))
context.data.update(attributes)
| StarcoderdataPython |
3313022 | <reponame>christophe12/RaspberryPython
#RGB of common colors:
# Aqua -> (0, 255, 255)
# Black -> (0, 0, 0)
# Blue -> (0, 0, 255)
# Fuchsia -> (255, 0, 255)
# Gray -> (128, 128, 128)
# Green -> (0, 128, 0)
# Lime -> (0, 255, 0)
# Maroon -> (128, 0, 0)
# Navy Blue -> (0, 0, 128)
# Olive -> (128, 128, 0)
# Purple -> (128, 0, 128)
# Red -> (255, 0, 0)
# Silver -> (192, 192, 192)
# Teal -> (0, 128, 128)
# White -> (255, 255, 255)
# Yellow -> (255, 255, 0)
#the convert_aplha() -> helps you to put transparent colors on a surface object
# pygame.Color() ->helps you to give parameters of the colors you will use. | StarcoderdataPython |
1660081 | import requests
import sys
from os.path import basename, splitext
#from platformio import util
from datetime import date
Import('env')
try:
import configparser
except ImportError:
import ConfigParser as configparser
config = configparser.ConfigParser()
config.read("platformio.ini")
#value1 = config.get("my_env", "custom_option1")
#
#ota_config = {k: v for k, v in config.get("mqtt_ota")}
#version = "2019-04-17" # project_config.get("common", "release_version")
#
# Push new firmware to the OTA storage
#
def publish_firmware(source, target, env):
firmware_path = str(source[0])
firmware_name = splitext(basename(firmware_path))[0]
version = date.today().isoformat()
print("Uploading {0} to OTA store. Version: {1}".format(
firmware_name, version))
url = "/".join([
"http://core.voneicken.com:1880", "esp32-firmware",
firmware_name,
version
])
print("URL: {0}".format(url))
#print(env.Dump())
headers = {
"Content-type": "application/octet-stream",
}
mqtt_device = config.get("env:"+env['PIOENV'], "mqtt_device")
if mqtt_device:
headers["mqtt_device"] = mqtt_device
print("OTA: command will be sent to {0}/ota".format(mqtt_device))
r = None
try:
r = requests.put(url,
data=open(firmware_path, "rb"),
headers=headers)
#auth=(bintray_config.get("user"), bintray_config['api_token']))
r.raise_for_status()
except requests.exceptions.RequestException as e:
sys.stderr.write("Failed to submit package: %s\n" %
("%s\n%s" % (r.status_code, r.text) if r else str(e)))
env.Exit(1)
print("The firmware has been successfuly uploaded")
#if r.status_code == 200:
# print("The firmware has been successfuly uploaded")
#else:
# print("*** Failed to submit package: {0}\n{1}".format(r.status_code, r.text))
# env.Exit(1)
# Custom upload command and program name
print(env["PROJECT_DIR"])
env.Replace(
PROGNAME=basename(env["PROJECT_DIR"]),
UPLOADCMD=publish_firmware
)
| StarcoderdataPython |
4821980 | """
numtoword.py
Yet another number to words in Python
Copyright 2021 Wardhana <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def numtoword(num):
digit = {
0: "nol",
1: "satu",
2: "dua",
3: "tiga",
4: "empat",
5: "lima",
6: "enam",
7: "tujuh",
8: "delapan",
9: "sembilan"
}
# Only official wording is listed.
#
# Reference:
#
# Pedoman Umum Pembentukan Istilah Edisi Ketiga, pp. 55-56
# https://badanbahasa.kemdikbud.go.id/lamanbahasa/sites/default/files/Pedoman_Umum%20Pembentukan_Istilah_PBN_0.pdf
#
# Accessed: 6 December 2021
tenpow = {
1: "puluh",
2: "ratus",
3: "ribu",
6: "juta",
9: "miliar",
12: "triliun",
15: "kuadriliun",
18: "kuintiliun",
21: "sekstiliun",
24: "septiliun",
27: "oktiliun",
30: "noniliun", # Not listed in KBBI.
33: "desiliun"
}
# alias order matters:
alias = {
"satu puluh" : "sepuluh",
"sepuluh satu" : "sebelas",
"sepuluh dua" : "dua belas",
"sepuluh tiga" : "tiga belas",
"sepuluh empat" : "empat belas",
"sepuluh lima" : "lima belas",
"sepuluh enam" : "enam belas",
"sepuluh tujuh" : "tujuh belas",
"sepuluh delapan" : "delapan belas",
"sepuluh sembilan" : "sembilan belas",
"satu ratus" : "seratus",
"satu ribu" : "seribu"
}
point = "koma"
minus = "minus"
tooLarge = "angka terlalu besar"
### integer to words ###
def itowords(inum):
l = len(str(inum))
w = []
# t = 0
for i in range(l):
n = int(str(inum)[i])
if n != 0:
w.append(digit[n])
t = 0
elif (n == 0) and (i == 0): # n is zero.
w.append(digit[n])
break
# the main algorithm
x = l - i - 1
y = x % 3
z = x if y == 0 else y
if z > t:
w.append(tenpow[z])
t = z
words = " ".join(w)
for k,v in alias.items():
words = words.replace(k,v)
return words
### decimal to words ###
def dtowords(dnum):
w = []
for i in str(dnum):
w.append(digit[int(i)])
return " ".join(w)
### process number ###
minus = (minus + " ") if num < 0 else ""
numAry = str(num).split('.')
if len(str(int(num))) > max(tenpow.keys()) + 3:
return tooLarge
elif len(numAry) == 1: # num is integer.
return minus + itowords(abs(num))
else: # num is float.
inum = int(abs(num))
dnum = int(numAry[1])
return minus + " ".join([itowords(inum), point, dtowords(dnum)])
# vim: ts=2 sts=2 sw=2 et
| StarcoderdataPython |
3203100 | <reponame>fadeevab/python-3-simple-server<gh_stars>0
#!/usr/bin/python3
from http import server
PORT = 8080
class CookieHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
if 'Host' in self.headers and \
self.headers['Host'].startswith("localhost"):
self.send_header('Set-Cookie', 'auth=1234; Domain=sub.localhost')
self.end_headers()
if 'Origin' in self.headers and \
self.headers['Origin'].startswith('fadeevab.com'):
self.wfile.write(b'I believe this request is from fadeevab.com!\n')
self.wfile.write(b'Hello, world!\n')
httpd = server.HTTPServer(('127.0.0.1', PORT), CookieHandler)
httpd.serve_forever()
| StarcoderdataPython |
1767054 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
setup(
name="engineering_diplomats",
version="1.0.0",
description="engineeringdiplomats.org",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
| StarcoderdataPython |
1652000 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField, SubmitField
class ShotForm(FlaskForm):
asset_task = SelectField('Add Asset task:', id='asset-task')
asset_users = SelectField('Add Assignee:', id='asset-task-user')
submit = SubmitField('submit')
| StarcoderdataPython |
1698438 | <reponame>Raalsky/neptune-client
#
# Copyright (c) 2021, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from neptune.new.internal.artifacts.types import ArtifactMetadataSerializer
class TestArtifactMetadataSerializer(unittest.TestCase):
def test_simple(self):
metadata = {
"location": "s3://bucket/path/to/file",
"last_modification": "2021-08-09 09:41:53",
"file_size": "18",
}
serialized = ArtifactMetadataSerializer.serialize(metadata)
self.assertListEqual(
[
{"key": "file_size", "value": "18"},
{"key": "last_modification", "value": "2021-08-09 09:41:53"},
{"key": "location", "value": "s3://bucket/path/to/file"},
],
serialized,
)
deserialized = ArtifactMetadataSerializer.deserialize(serialized)
self.assertDictEqual(metadata, deserialized)
| StarcoderdataPython |
3228531 | """
__init__.py
pytracer.geometry package
Created by Jiayao on Aug 13, 2017
"""
from __future__ import absolute_import
from typing import overload
from pytracer import *
# Classes
class Vector(np.ndarray):
"""
Vector Class
A wrappper subclasses numpy.ndarray which
models a 3D vector.
"""
@overload
def __new__(cls, x: float=0., y: float=0., z: float=0, dtype=float):
pass
def __new__(cls, x: FLOAT=0., y: FLOAT=0., z: FLOAT=0, dtype=FLOAT):
return np.empty(3, dtype=dtype).view(cls)
@overload
def __init__(self, x: float=0., y: float=0., z: float=0., dtype=float):
pass
def __init__(self, x: FLOAT=0., y: FLOAT=0., z:FLOAT=0., dtype=FLOAT):
if np.isnan(x) or np.isnan(y) or np.isnan(z):
raise ValueError
super().__init__()
self[0] = x
self[1] = y
self[2] = z
@classmethod
def from_arr(cls, n: 'np.ndarray'): # Forward type hint (PEP-484)
assert np.shape(n)[0] == 3
return cls(n[0], n[1], n[2])
@property
def x(self):
return self[0]
@x.setter
def x(self, value):
self[0] = value
@property
def y(self):
return self[1]
@y.setter
def y(self, value):
self[1] = value
@property
def z(self):
return self[2]
@z.setter
def z(self, value):
self[2] = value
def __eq__(self, other):
return np.array_equal(self, other)
def __ne__(self, other):
return not np.array_equal(self, other)
def abs_dot(self, other) -> FLOAT:
return np.fabs(np.dot(self, other))
def cross(self, other) -> 'Vector':
return Vector(self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x)
def sq_length(self) -> FLOAT:
return self.x * self.x + self.y * self.y + self.z * self.z
def length(self) -> FLOAT:
return np.sqrt(self.sq_length())
def normalize(self):
"""inplace normalization"""
length = self.length()
if not length == 0:
self /= length
return self
class Point(np.ndarray):
"""
Point class
A wrappper subclasses numpy.ndarray which
models a 3D vector. Note the subtle difference
between vector.
The defaul inheritance behavior is consistent
with the notion of Point and Vector arithmetic.
The returining type is the same as the type
of the first operant, thus we may, e.g., offset
a point by a vector.
"""
@overload
def __new__(cls, x: float=0., y: float=0., z: float=0, dtype=float):
pass
def __new__(cls, x: FLOAT=0., y: FLOAT=0., z: FLOAT=0, dtype=FLOAT):
return np.empty(3, dtype=dtype).view(cls)
@overload
def __init__(self, x: float=0., y: float=0., z: float=0., dtype=float):
pass
def __init__(self, x: FLOAT=0., y: FLOAT=0., z:FLOAT=0., dtype=FLOAT):
if np.isnan(x) or np.isnan(y) or np.isnan(z):
raise ValueError
super().__init__()
self.x = x
self.y = y
self.z = z
@classmethod
def from_arr(cls, n: 'np.ndarray'): # Forward type hint (PEP-484)
assert np.shape(n)[0] == 3
return cls(n[0], n[1], n[2])
@property
def x(self):
return self[0]
@x.setter
def x(self, value):
self[0] = value
@property
def y(self):
return self[1]
@y.setter
def y(self, value):
self[1] = value
@property
def z(self):
return self[2]
@z.setter
def z(self, value):
self[2] = value
def __eq__(self, other):
return np.array_equal(self, other)
def __ne__(self, other):
return not np.array_equal(self, other)
def __sub__(self, other):
if isinstance(other, Point): # no other methods found
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
elif isinstance(other, Vector):
return Point(self.x - other.x, self.y - other.y, self.z - other.z)
else:
raise TypeError("unsupported __sub__ between '{}' and '{}'".format(self.__class__, type(other)))
def __isub__(self, other):
raise TypeError("undefined inplace substraction between Point")
# addition, however, is defined, as can be used for weighing points
def sq_length(self) -> FLOAT:
return self.x * self.x + self.y * self.y + self.z * self.z
def length(self) -> FLOAT:
return np.sqrt(self.sq_length())
def sq_dist(self, other) -> FLOAT:
return (self.x - other.x) * (self.x - other.x) + \
(self.y - other.y) * (self.y - other.y) + \
(self.z - other.z) * (self.z - other.z)
def dist(self, other) -> FLOAT:
return np.sqrt((self.x - other.x) * (self.x - other.x) +
(self.y - other.y) * (self.y - other.y) +
(self.z - other.z) * (self.z - other.z))
class Normal(np.ndarray):
"""
Normal vector class
A wrapper subclasses numpy.ndarray which
models a 3D vector.
"""
@overload
def __new__(cls, x: float=0., y: float=0., z: float=0, dtype=float):
pass
def __new__(cls, x: FLOAT=0., y: FLOAT=0., z: FLOAT=0, dtype=FLOAT):
return np.empty(3, dtype=dtype).view(cls)
@overload
def __init__(self, x: float=0., y: float=0., z: float=0., dtype=float):
pass
def __init__(self, x: FLOAT=0., y: FLOAT=0., z:FLOAT=0., dtype=FLOAT):
super().__init__()
if np.isnan(x) or np.isnan(y) or np.isnan(z):
raise ValueError
self.x = x
self.y = y
self.z = z
@classmethod
def from_arr(cls, n: 'np.ndarray'): # Forward type hint (PEP-484)
assert np.shape(n)[0] == 3
return cls(n[0], n[1], n[2])
@property
def x(self):
return self[0]
@x.setter
def x(self, value):
self[0] = value
@property
def y(self):
return self[1]
@y.setter
def y(self, value):
self[1] = value
@property
def z(self):
return self[2]
@z.setter
def z(self, value):
self[2] = value
def __eq__(self, other):
return np.array_equal(self, other)
def __ne__(self, other):
return not np.array_equal(self, other)
def abs_dot(self, other):
return np.fabs(np.dot(self, other))
def cross(self, other) -> Vector:
return Vector(self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x)
def sq_length(self) -> FLOAT:
return self.x * self.x + self.y * self.y + self.z * self.z
def length(self) -> FLOAT:
return np.sqrt(self.sq_length())
def normalize(self):
"""inplace normalization"""
length = self.length()
if not length == 0:
self /= length
return self
class Ray(object):
"""
Ray class
Models a semi-infinite ray as parametric line with
starting `Point`, direction `Vector`, visual range from
`mint` to `maxt`, the bouncing `depth` and animated `time`.
"""
def __init__(self, o: 'Point'=Point(0., 0., 0.), d: 'Vector'=Vector(0., 0., 0.),
mint: FLOAT = 0., maxt: FLOAT = np.inf,
depth: INT = 0, time: FLOAT = 0.):
self.o = Point(o.x, o.y, o.z)
self.d = Vector(d.x, d.y, d.z)
self.mint = mint
self.maxt = maxt
self.depth = depth
self.time = time # for motion blur or animation purposes
def __repr__(self):
return "{}\nOrigin: {}\nDirection: {}\nmint: {} \
maxt: {}\ndepth: {}\ntime: {}".format(self.__class__,
self.o, self.d, self.mint, self.maxt, self.depth, self.time)
@classmethod
def from_parent(cls, o: 'Point', d: 'Vector', r: 'Ray',
mint: FLOAT = 0., maxt: FLOAT = np.inf):
"""
initialize from a parent ray
"""
return cls(o, d, mint, maxt, r.depth + 1, r.time)
@classmethod
def from_ray(cls, r: 'Ray'):
"""
initialize from a ray, analogous to a copy constructor
"""
return cls(r.o, r.d, r.mint, r.maxt, r.depth, r.time)
def __call__(self, t) -> 'Point':
"""
point at parameter t
"""
return (self.o + self.d * t).view(Point)
class RayDifferential(Ray):
"""
RayDifferential Class
Subclasses `Ray` for texture mapping
"""
def __init__(self, o: 'Point'=Point(0, 0, 0), d: 'Vector'=Vector(0, 0, 0),
mint: FLOAT = 0., maxt: FLOAT = np.inf,
depth: INT = 0, time: FLOAT = 0.):
super().__init__(o, d, mint, maxt, depth, time)
self.has_differentials = False
self.rxOrigin = Point(0., 0., 0.)
self.ryOrigin = Point(0., 0., 0.)
self.rxDirection = Vector(0., 0., 0.)
self.ryDirection = Vector(0., 0., 0.)
@classmethod
def from_parent(cls, o: 'Point', d: 'Vector', r: 'RayDifferential',
mint: FLOAT=0., maxt: FLOAT =np.inf):
"""
initialize from a parent ray
"""
return cls(o, d, mint, maxt, r.depth + 1, r.time)
@classmethod
def from_ray(cls, r: 'Ray'):
"""
initialize from a ray, analogous to a copy constructor
"""
return cls(r.o, r.d, r.mint, r.maxt, r.depth, r.time)
@classmethod
def from_rd(cls, r: 'RayDifferential'):
"""
initialize from a `RayDifferential`, analogous to a copy constructor
"""
self = cls(r.o, r.d, r.mint, r.maxt, r.depth, r.time)
self.has_differentials = r.has_differentials
self.rxOrigin = r.rxOrigin.copy()
self.ryOrigin = r.ryOrigin.copy()
self.rxDirection = r.rxDirection.copy()
self.ryDirection = r.ryDirection.copy()
return self
def scale_differential(self, s: FLOAT):
self.rxOrigin = self.o + (self.rxOrigin - self.o) * s
self.ryOrigin = self.o + (self.ryOrigin - self.o) * s
self.rxDirection = self.d + (self.rxDirection - self.d) * s
self.ryDirection = self.d + (self.ryDirection - self.d) * s
return self
class BBox:
"""
BBox Class
Models 3D axis-aligned bounding boxes
represented by a pair of opposite vertices.
"""
def __init__(self, p1=None, p2=None):
if p1 is not None and p2 is not None:
self.pMin = Point(min(p1.x, p2.x),
min(p1.y, p2.y),
min(p1.z, p2.z))
self.pMax = Point(max(p1.x, p2.x),
max(p1.y, p2.y),
max(p1.z, p2.z))
# default: degenerated BBox
elif p1 is None and p2 is None:
self.pMin = Point(np.inf, np.inf, np.inf)
self.pMax = Point(-np.inf, -np.inf, -np.inf)
elif p2 is None:
self.pMin = p1.copy()
self.pMax = Point(np.inf, np.inf, np.inf)
else:
self.pMin = Point(-np.inf, -np.inf, -np.inf)
self.pMax = p2.copy()
@classmethod
def from_bbox(cls, box: 'BBox'):
return cls(box.pMin, box.pMax)
def __repr__(self):
return "{}\npMin:{}\npMax:{}".format(self.__class__,
self.pMin,
self.pMax)
def __eq__(self, other):
return np.array_equal(self.pMin, other.pMin) and \
np.array_equal(self.pMax, other.pMax)
def __ne__(self, other):
return not np.array_equal(self.pMin, other.pMax) or \
not np.array_equal(self.pMin, other.pMax)
def __getitem__(self, key):
if key == 0:
return self.pMin
elif key == 1:
return self.pMax
else:
raise KeyError
@staticmethod
def Union(b1, b2) -> 'BBox':
"""
Return the union of a `BBox`
and a `Point` or a union
of two `Box`es.
"""
ret = BBox()
if isinstance(b2, Point):
ret.pMin.x = min(b1.pMin.x, b2.x)
ret.pMin.y = min(b1.pMin.y, b2.y)
ret.pMin.z = min(b1.pMin.z, b2.z)
ret.pMax.x = max(b1.pMax.x, b2.x)
ret.pMax.y = max(b1.pMax.y, b2.y)
ret.pMax.z = max(b1.pMax.z, b2.z)
elif isinstance(b2, BBox):
ret.pMin.x = min(b1.pMin.x, b2.pMin.x)
ret.pMin.y = min(b1.pMin.y, b2.pMin.y)
ret.pMin.z = min(b1.pMin.z, b2.pMin.z)
ret.pMax.x = max(b1.pMax.x, b2.pMax.x)
ret.pMax.y = max(b1.pMax.y, b2.pMax.y)
ret.pMax.z = max(b1.pMax.z, b2.pMax.z)
else:
raise TypeError('unsupported union operation between\
{} and {}'.format(type(b1), type(b2)))
return ret
def union(self, other) -> 'BBox':
"""
Return self as the union of a `BBox`
and a `Point` or a union
of two `Box`es.
"""
if isinstance(other, Point):
self.pMin.x = min(self.pMin.x, other.x)
self.pMin.y = min(self.pMin.y, other.y)
self.pMin.z = min(self.pMin.z, other.z)
self.pMax.x = max(self.pMax.x, other.x)
self.pMax.y = max(self.pMax.y, other.y)
self.pMax.z = max(self.pMax.z, other.z)
elif isinstance(other, BBox):
self.pMin.x = min(self.pMin.x, other.pMin.x)
self.pMin.y = min(self.pMin.y, other.pMin.y)
self.pMin.z = min(self.pMin.z, other.pMin.z)
self.pMax.x = max(self.pMax.x, other.pMax.x)
self.pMax.y = max(self.pMax.y, other.pMax.y)
self.pMax.z = max(self.pMax.z, other.pMax.z)
else:
raise TypeError('unsupported union operation between\
{} and {}'.format(self.__class__, type(other)))
return self
def overlaps(self, other: 'BBox') -> bool:
"""
Determines whether two `BBox`es overlaps
"""
return (self.pMax.x >= other.pMin.x) and (self.pMin.x <= other.pMax.x) and \
(self.pMax.y >= other.pMin.y) and (self.pMin.y <= other.pMax.y) and \
(self.pMax.z >= other.pMin.z) and (self.pMin.z <= other.pMax.z)
def inside(self, pnt: 'Point') -> bool:
"""
Determines whether a given `Point`
is inside the box
"""
return (self.pMax.x >= pnt.x) and (self.pMin.x <= pnt.x) and \
(self.pMax.y >= pnt.y) and (self.pMin.y <= pnt.y) and \
(self.pMax.z >= pnt.z) and (self.pMin.z <= pnt.z)
def expand(self, delta: FLOAT) -> 'BBox':
"""
Expands box by a constant factor
"""
self.pMin.x -= delta
self.pMin.y -= delta
self.pMin.z -= delta
self.pMax.x += delta
self.pMax.y += delta
self.pMax.z += delta
return self
def surface_area(self) -> FLOAT:
"""
Computes the surface area
"""
d = (self.pMax - self.pMin).view(Vector)
return 2. * (d.x * d.y + d.x * d.z + d.y * d.z)
def volume(self) -> FLOAT:
"""
Computes the volume
"""
d = (self.pMax - self.pMin).view(Vector)
return d.x * d.y * d.z
def maximum_extent(self):
"""
Find the maximum axis
"""
delta = (self.pMax - self.pMin).view(Vector)
if delta.x > delta.y and delta.x > delta.z:
return 0
elif delta.y > delta.z:
return 1
else:
return 2
def lerp(self, tx: FLOAT, ty: FLOAT, tz: FLOAT) -> 'Point':
"""
lerp
3D Linear interpolation between two opposite vertices
"""
return Point(util.lerp(tx, self.pMin.x, self.pMax.x),
util.lerp(ty, self.pMin.y, self.pMax.y),
util.lerp(tz, self.pMin.z, self.pMax.z))
def offset(self, pnt: 'Point') -> 'Vector':
"""
offset
Get point relative to the corners
"""
return Vector((pnt.x - self.pMin.x) / (self.pMax.x - self.pMin.x),
(pnt.y - self.pMin.y) / (self.pMax.y - self.pMin.y),
(pnt.z - self.pMin.z) / (self.pMax.z - self.pMin.z))
def bounding_sphere(self) -> ('Point', FLOAT):
"""
bounding_sphere
Get the center and radius of the bounding sphere
"""
ctr = (.5 * (self.pMin + self.pMax)).view(Point)
rad = 0.
if self.inside(ctr):
rad = ctr.dist(self.pMax)
return ctr, rad
def intersect_p(self, r: 'Ray') -> [bool, FLOAT, FLOAT]:
"""
intersect_p()
Check whether a ray intersects the BBox
Compare intervals along each dimension
returns the shortest/largest parametric values
if intersects.
"""
t0, t1 = r.mint, r.maxt
# automatically convert /0. to np.inf
t_near = np.true_divide((self.pMin - r.o), r.d)
t_far = np.true_divide((self.pMax - r.o), r.d)
if isinstance(t0, RayDifferential) or isinstance(t_near[0], RayDifferential):
a = 1
b = 2
pass
for i in range(3):
if t_near[i] > t_far[i]:
t_near[i], t_far[i] = t_far[i], t_near[i]
t0, t1 = np.fmax(t0, t_near[i]), np.fmin(t1, t_far[i])
if t0 > t1:
return [False, 0., 0.]
return [True, t0, t1]
from pytracer.geometry.diffgeom import DifferentialGeometry
from pytracer.geometry.utility import * | StarcoderdataPython |
1698964 | """Write tax data to the console in Tax Exchange Format (TXF)."""
import datetime
from typing import Optional
def _txf_write(*obj: str) -> None:
# Write objects to the console with the recommended TXF line terminator.
print(*obj, end='\r\n')
return
def _txf_normalize_amount(amount: str) -> str:
# Remove any '$' or ',' or '-' characters from the amount.
amount = amount.translate({36: None, 44: None, 45: None})
return '0' + amount if amount.startswith('.') else amount
def _txf_expense(amount: str) -> str:
# Ensure that non-zero expense amounts are negative.
amount = _txf_normalize_amount(amount)
return amount if amount == '0.00' else '-' + amount
def _txf_income(amount: str) -> str:
# Ensure that non-zero income amounts are positive.
return _txf_normalize_amount(amount)
def _txf_write_record_format_1(
amount: str, ref_num: int,
copy: int = 1, line: int = 1, detail: Optional[str] = None) -> None:
# Write a Record Format 1 TXF record.
_txf_write('TS' if detail is None else 'TD')
_txf_write(f'N{ref_num}')
_txf_write(f'C{copy}')
_txf_write(f'L{line}')
_txf_write('$' + amount)
if detail is not None:
_txf_write('X' + detail)
_txf_write('^')
return
def _txf_write_record_format_3(
amount: str, description: str, ref_num: int,
copy: int = 1, line: int = 1, detail: Optional[str] = None) -> None:
# Write a Record Format 3 TXF record.
_txf_write('TS' if detail is None else 'TD')
_txf_write(f'N{ref_num}')
_txf_write(f'C{copy}')
_txf_write(f'L{line}')
_txf_write('$' + amount)
_txf_write('P' + description)
if detail is not None:
_txf_write('X' + detail)
_txf_write('^')
return
def _txf_write_record_format_6(
date: str, amount: str, state: str, ref_num: int, copy: int = 1,
line: int = 1, detail: Optional[str] = None) -> None:
# Write a Record Format 6 TXF record.
_txf_write('TS' if detail is None else 'TD')
_txf_write(f'N{ref_num}')
_txf_write(f'C{copy}')
_txf_write(f'L{line}')
_txf_write('D' + date)
_txf_write('$' + amount)
_txf_write('P' + state)
if detail is not None:
_txf_write('X' + detail)
_txf_write('^')
return
def write_header(program: str) -> None:
"""Write a TXF header."""
_txf_write('V042')
_txf_write('A' + program)
_txf_write('D' + datetime.date.today().strftime('%m/%d/%Y'))
_txf_write('^')
return
def write_1099int(payer: str, box_1: str = None, box_3: str = None, box_4: str = None) -> None:
"""Write TXF records for a Form 1099-INT."""
if box_1 is not None:
_txf_write_record_format_3(_txf_income(box_1), payer, 287)
if box_3 is not None:
_txf_write_record_format_3(_txf_income(box_3), payer, 288)
if box_4 is not None:
_txf_write_record_format_3(_txf_expense(box_4), payer, 616)
return
def write_cash_donation(
date: str, payee: str, amount: str, account: str, check_number: str, memo: str,
category: str) -> None:
"""Write a TXF detail record for a cash donation."""
# Ensure that a category is present so that TurboTax will parse the detail line correctly.
if category.lstrip() == '':
category = 'Cash donation'
_txf_write_record_format_1(
_txf_expense(amount), 280,
detail=f'{date:10.10} {account:30.30} {check_number:6.6} {payee:40.40}'\
f'{memo:40.40} {category:.15}')
return
def write_cash_donations_summary(amount: str) -> None:
"""Write a TXF summary record for cash donations."""
_txf_write_record_format_1(_txf_expense(amount), 280)
return
def write_federal_est_tax_payment(
date: str, amount: str, account: str, check_number: str, payee: str, memo: str,
category: str) -> None:
"""Write a TXF detail record for a federal quarterly estimated tax payment."""
# Ensure that a category is present so that TurboTax will parse the detail line correctly.
if category.lstrip() == '':
category = 'Fed qtr est tax'
_txf_write_record_format_6(
date, _txf_expense(amount), 'XX', 521,
detail=f'{date:10.10} {account:30.30} {check_number:6.6} {payee:40.40}'\
f'{memo:40.40} {category:.15}')
return
def write_federal_est_tax_summary(amount: str) -> None:
"""Write a TXF summary record for federal quarterly estimated tax payments."""
_txf_write_record_format_6('', _txf_expense(amount), 'XX', 521)
return
def write_state_est_tax_payment(
date: str, amount: str, state: str, account: str, check_number: str, payee: str,
memo: str, category: str) -> None:
"""Write a TXF detail record for a state quarterly estimated tax payment."""
# Ensure that a category is present so that TurboTax will parse the detail line correctly.
if category.lstrip() == '':
category = 'Sta qtr est tax'
_txf_write_record_format_6(
date, _txf_expense(amount), state, 522,
detail=f'{date:10.10} {account:30.30} {check_number:6.6} {payee:40.40}'\
f'{memo:40.40} {category:.15}')
return
def write_state_est_tax_summary(amount: str, state: str) -> None:
"""Write a TXF summary record for state quarterly estimated tax payments."""
_txf_write_record_format_6('', _txf_expense(amount), state, 522)
return
| StarcoderdataPython |
3394273 | import os
import pytest
import tempfile
import shutil
import sys
from programs.utils import ship_files2spark
if 'SPARK_HOME' not in os.environ:
os.environ['SPARK_HOME'] = '/usr/lib/spark'
@pytest.fixture(scope="module")
def spark():
tempdir = tempfile.mkdtemp()
# Add the directory with pyspark and py4j (in shape of zip file)
sys.path.append(os.path.join(os.environ['SPARK_HOME'], 'python'))
sys.path.append(os.path.join(os.environ['SPARK_HOME'], 'python', 'lib', 'py4j-src.zip'))
# Create local Spark session (so that we don't have to put local files to HDFS)
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('2018 e2e programs module test')\
.config("spark.submit.deployMode", "client")\
.config("spark.authenticate.secret", "111")\
.config("spark.master", "local[*]")\
.getOrCreate()
ship_files2spark(spark, tempdir)
yield spark
spark.stop()
shutil.rmtree(tempdir)
| StarcoderdataPython |
4822307 | import pytest
from dlms_cosem import cosem, enumerations
from dlms_cosem.protocol import xdlms
class TestSetRequestNormal:
def test_transform_bytes(self):
data = b"\xc1\x01\xc1\x00\x08\x00\x00\x01\x00\x00\xff\x02\x00\t\x0c\x07\xe5\x01\x18\xff\x0e09P\xff\xc4\x00"
request = xdlms.SetRequestNormal(
cosem_attribute=cosem.CosemAttribute(
interface=enumerations.CosemInterface.CLOCK,
instance=cosem.Obis(a=0, b=0, c=1, d=0, e=0, f=255),
attribute=2,
),
data=b"\t\x0c\x07\xe5\x01\x18\xff\x0e09P\xff\xc4\x00",
access_selection=None,
invoke_id_and_priority=xdlms.InvokeIdAndPriority(
invoke_id=1, confirmed=True, high_priority=True
),
)
assert data == request.to_bytes()
assert request == xdlms.SetRequestNormal.from_bytes(data)
def test_wrong_tag_raises_value_error(self):
data = b"\xc2\x01\xc1\x00\x08\x00\x00\x01\x00\x00\xff\x02\x00\t\x0c\x07\xe5\x01\x18\xff\x0e09P\xff\xc4\x00"
with pytest.raises(ValueError):
xdlms.SetRequestNormal.from_bytes(data)
def test_wrong_type_raises_value_error(self):
data = b"\xc1\x02\xc1\x00\x08\x00\x00\x01\x00\x00\xff\x02\x00\t\x0c\x07\xe5\x01\x18\xff\x0e09P\xff\xc4\x00"
with pytest.raises(ValueError):
xdlms.SetRequestNormal.from_bytes(data)
class TestSetRequestFactory:
def test_set_request_normal(self):
data = b"\xc1\x01\xc1\x00\x08\x00\x00\x01\x00\x00\xff\x02\x00\t\x0c\x07\xe5\x01\x18\xff\x0e09P\xff\xc4\x00"
request = xdlms.SetRequestFactory.from_bytes(data)
assert isinstance(request, xdlms.SetRequestNormal)
def test_wrong_tag_raises_value_error(self):
data = b"\xc2\x01\xc1\x00\x08\x00\x00\x01\x00\x00\xff\x02\x00\t\x0c\x07\xe5\x01\x18\xff\x0e09P\xff\xc4\x00"
with pytest.raises(ValueError):
xdlms.SetRequestFactory.from_bytes(data)
def test_request_with_first_block_raises_not_implemented_error(self):
data = b"\xc1\x02\xc1\x00\x08\x00\x00\x01\x00\x00\xff\x02\x00\t\x0c\x07\xe5\x01\x18\xff\x0e09P\xff\xc4\x00"
with pytest.raises(NotImplementedError):
xdlms.SetRequestFactory.from_bytes(data)
def test_set_request_with_block_raises_not_implemented_error(self):
data = b"\xc1\x03\xc1\x00\x08\x00\x00\x01\x00\x00\xff\x02\x00\t\x0c\x07\xe5\x01\x18\xff\x0e09P\xff\xc4\x00"
with pytest.raises(NotImplementedError):
xdlms.SetRequestFactory.from_bytes(data)
def test_set_with_list_raises_not_implemented_error(self):
data = b"\xc1\x04\xc1\x00\x08\x00\x00\x01\x00\x00\xff\x02\x00\t\x0c\x07\xe5\x01\x18\xff\x0e09P\xff\xc4\x00"
with pytest.raises(NotImplementedError):
xdlms.SetRequestFactory.from_bytes(data)
def test_set_request_with_list_first_block_raises_not_implemented_block(self):
data = b"\xc1\x05\xc1\x00\x08\x00\x00\x01\x00\x00\xff\x02\x00\t\x0c\x07\xe5\x01\x18\xff\x0e09P\xff\xc4\x00"
with pytest.raises(NotImplementedError):
xdlms.SetRequestFactory.from_bytes(data)
class TestSetResponseNormal:
def test_transform_bytes(self):
data = b"\xc5\x01\xc1\x00"
response = xdlms.SetResponseNormal(
result=enumerations.DataAccessResult.SUCCESS,
invoke_id_and_priority=xdlms.InvokeIdAndPriority(
invoke_id=1, confirmed=True, high_priority=True
),
)
assert data == response.to_bytes()
assert response == xdlms.SetResponseNormal.from_bytes(data)
def test_wrong_tag_raises_value_error(self):
data = b"\xc6\x01\xc1\x00"
with pytest.raises(ValueError):
xdlms.SetRequestNormal.from_bytes(data)
def test_wrong_type_raises_value_error(self):
data = b"\xc5\x02\xc1\x00"
with pytest.raises(ValueError):
xdlms.SetRequestNormal.from_bytes(data)
class TestSetResponseFactory:
def test_set_response_normal(self):
data = b"\xc5\x01\xc1\x00"
request = xdlms.SetResponseFactory.from_bytes(data)
assert isinstance(request, xdlms.SetResponseNormal)
def test_wrong_tag_raises_value_error(self):
data = b"\xc6\x01\xc1\x00"
with pytest.raises(ValueError):
xdlms.SetResponseFactory.from_bytes(data)
def test_set_response_with_block_raises_not_implemented_error(self):
data = b"\xc5\x02\xc1\x00"
with pytest.raises(NotImplementedError):
xdlms.SetResponseFactory.from_bytes(data)
def test_set_response_last_block_raises_not_implemented_error(self):
data = b"\xc5\x03\xc1\x00"
with pytest.raises(NotImplementedError):
xdlms.SetResponseFactory.from_bytes(data)
def test_set_response_last_block_with_list_raises_not_implemented_error(self):
data = b"\xc5\x04\xc1\x00"
with pytest.raises(NotImplementedError):
xdlms.SetResponseFactory.from_bytes(data)
def test_set_response_with_list_raises_not_implemented_error(self):
data = b"\xc5\x05\xc1\x00"
with pytest.raises(NotImplementedError):
xdlms.SetResponseFactory.from_bytes(data)
| StarcoderdataPython |
3298350 | import discord
from discord.ext import commands
class Meh:
"""Tells a user that you said meh"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def meh(self, ctx, user : discord.Member):
"""Tags a person and tells them meh"""
#Your code will go here
message = ctx.message
await self.bot.delete_message(message)
author = ctx.message.author
await self.bot.say("Hey, " + user.mention + "!! " + author.mention + " wanted to tell you 'Meh'")
def setup(bot):
n = Meh(bot)
bot.add_cog(n)
| StarcoderdataPython |
3203333 | <gh_stars>0
# Generated by Django 3.1.4 on 2022-02-22 09:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Workspace',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('workspace_id', models.CharField(max_length=16, null=True)),
('public_key', models.CharField(editable=False, max_length=64, null=True)),
('status', models.CharField(choices=[('active', 'Active'), ('archived', 'Archived'), ('deleted', 'Deleted')], default='active', max_length=25)),
('created_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='WorkspaceUser',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('active', 'Active'), ('archived', 'Archived'), ('deleted', 'Deleted')], default='active', max_length=25)),
('role', models.CharField(choices=[('admin', 'Admin'), ('user', 'User')], default='user', max_length=25)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('workspace', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='workspaces.workspace')),
],
),
]
| StarcoderdataPython |
1770007 | <filename>setup.py
from setuptools import setup
# Dependencies.
with open('requirements.txt') as f:
tests_require = f.readlines()
install_requires = [t.strip() for t in tests_require]
setup(name='contextily',
version='0.99.0',
description='Context geo-tiles in Python',
url='https://github.com/darribas/contextily',
author='<NAME>',
author_email='<EMAIL>',
license='3-Clause BSD',
packages=['contextily'],
package_data={'': ['requirements.txt']},
install_requires=install_requires,
zip_safe=False)
| StarcoderdataPython |
3289095 | import argparse
import os
from uuid import uuid1
import optuna
from dotenv import load_dotenv
from optuna.integration.pytorch_lightning import PyTorchLightningPruningCallback
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping, GPUStatsMonitor, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from repalette.constants import (
DEFAULT_PRETRAIN_BETA_1,
DEFAULT_PRETRAIN_BETA_2,
MODEL_CHECKPOINTS_DIR,
RDS_OPTUNA_DATABASE,
S3_LIGHTNING_LOGS_DIR,
S3_MODEL_CHECKPOINTS_RELATIVE_DIR,
)
from repalette.lightning.callbacks import LogPairRecoloringToTensorboard
from repalette.lightning.datamodules import PreTrainDataModule
from repalette.lightning.systems import PreTrainSystem
from repalette.utils.aws import upload_to_s3
if __name__ == "__main__":
# load .env variables
load_dotenv()
# hyperparameters
hparams_parser = argparse.ArgumentParser()
# trainer
hparams_parser.add_argument("--max-epochs", type=int, default=None)
hparams_parser.add_argument("--gpus", type=int, default=-1)
hparams_parser.add_argument("--precision", type=int, default=16, choices=[16, 32])
hparams_parser.add_argument("--accumulate-grad-batches", type=int, default=1)
hparams_parser.add_argument("--gradient-clip-val", type=float, default=0.0)
# callbacks
hparams_parser.add_argument("--patience", type=int, default=20)
hparams_parser.add_argument("--save-top-k", type=int, default=1)
hparams_parser.add_argument("--pruning", type=bool, default=True)
# pretrain task
hparams_parser.add_argument("--beta-1", type=float, default=DEFAULT_PRETRAIN_BETA_1)
hparams_parser.add_argument("--beta-2", type=float, default=DEFAULT_PRETRAIN_BETA_2)
hparams_parser.add_argument("--optimizer", type=str, default="adam", choices=["adam", "adamw"])
hparams_parser.add_argument("--scheduler-patience", type=int, default=10)
hparams_parser.add_argument("--batch-size", type=int, default=8)
hparams_parser.add_argument("--multiplier", type=int, default=16)
# datamodule
hparams_parser.add_argument("--num-workers", type=int, default=7)
hparams_parser.add_argument("--shuffle", type=bool, default=True)
hparams_parser.add_argument("--size", type=float, default=1.0)
hparams_parser.add_argument("--pin-memory", type=bool, default=True)
hparams_parser.add_argument("--train-batch-from-same-image", type=bool, default=True)
hparams_parser.add_argument("--val-batch-from-same-image", type=bool, default=True)
hparams_parser.add_argument("--test-batch-from-same-image", type=bool, default=True)
# misc
hparams_parser.add_argument("--name", type=str, default="pretrain", help="experiment name")
hparams_parser.add_argument(
"--version",
type=str,
default=None,
help="unique! run version - used to generate checkpoint S3 path",
)
hparams_parser.add_argument("--logger", type=str, default="tensorboard", choices=["tensorboard"])
hparams_parser.add_argument(
"--n_trials",
type=int,
default=1,
help="Number of optuna trials. Leave 1 if run from Cosmos",
)
hparams = hparams_parser.parse_args()
def objective(trial):
if hparams.version is None:
hparams.version = str(uuid1())
# main LightningModule
pretrain_system = PreTrainSystem(
learning_rate=trial.suggest_loguniform("learning_rate", 1e-5, 1e-2),
beta_1=hparams.beta_1,
beta_2=hparams.beta_2,
weight_decay=trial.suggest_uniform("weight_decay", 1e-5, 1e-2),
optimizer=hparams.optimizer,
batch_size=hparams.batch_size,
multiplier=hparams.multiplier,
scheduler_patience=hparams.scheduler_patience,
)
pretrain_checkpoints = ModelCheckpoint(
dirpath=MODEL_CHECKPOINTS_DIR,
monitor="Val/loss_epoch",
verbose=True,
mode="min",
save_top_k=hparams.save_top_k,
)
pretrain_early_stopping = EarlyStopping(
monitor="Val/loss_epoch",
min_delta=0.00,
patience=hparams.patience,
verbose=False,
mode="min",
)
pretrain_gpu_stats_monitor = GPUStatsMonitor(temperature=True)
log_recoloring_to_tensorboard = LogPairRecoloringToTensorboard()
optuna_pruning = PyTorchLightningPruningCallback(monitor="Val/loss_epoch", trial=trial)
logger = TensorBoardLogger(
S3_LIGHTNING_LOGS_DIR,
name=hparams.name,
version=hparams.version,
log_graph=True,
default_hp_metric=False,
)
trainer = Trainer.from_argparse_args(
hparams,
logger=logger,
checkpoint_callback=pretrain_checkpoints,
callbacks=[
pretrain_early_stopping,
log_recoloring_to_tensorboard,
pretrain_gpu_stats_monitor,
optuna_pruning,
],
profiler="simple",
)
datamodule = PreTrainDataModule(
batch_size=pretrain_system.hparams.batch_size,
multiplier=pretrain_system.hparams.multiplier,
shuffle=hparams.shuffle,
num_workers=hparams.num_workers,
size=hparams.size,
pin_memory=hparams.pin_memory,
train_batch_from_same_image=hparams.train_batch_from_same_image,
val_batch_from_same_image=hparams.val_batch_from_same_image,
test_batch_from_same_image=hparams.test_batch_from_same_image,
)
# trainer.tune(pretrain_system, datamodule=datamodule)
trainer.fit(pretrain_system, datamodule=datamodule)
# get best checkpoint
best_model_path = pretrain_checkpoints.best_model_path
pretrain_system = PreTrainSystem.load_from_checkpoint(best_model_path)
test_result = trainer.test(pretrain_system, datamodule=datamodule)
pretrain_system.hparams.test_metric_name = test_result[0]["Test/loss_epoch"]
logger.log_hyperparams(pretrain_system.hparams)
logger.finalize(status="success")
# upload best model to S3
S3_best_model_path = os.path.join(
S3_MODEL_CHECKPOINTS_RELATIVE_DIR,
hparams.name,
".".join([hparams.version, best_model_path.split(".")[-1]]),
)
upload_to_s3(best_model_path, S3_best_model_path)
return test_result[0]["Test/loss_epoch"]
pruner = optuna.pruners.MedianPruner() if hparams.pruning else optuna.pruners.NopPruner()
study = optuna.create_study(direction="minimize", storage=RDS_OPTUNA_DATABASE, pruner=pruner)
study.optimize(objective, n_trials=hparams.n_trials)
| StarcoderdataPython |
3214174 | #! /usr/bin/env python
from setuptools import setup
import re
from os import path
with open('tweetfinder/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1)
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(name='tweetfinder',
version=version,
description='Find tweets embedded and mentioned in news articles online.',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
packages={'tweetfinder'},
package_data={'': ['LICENSE']},
include_package_data=True,
install_requires=[
"requests>=2.26.0",
"readability-lxml>=0.8.1",
"beautifulsoup4>=4.10.0",
"pycld2>=0.41",
],
project_urls={
"Source": "https://github.com/dataculturegroup/Tweet-Finder",
"Docs": "https://tweet-finder.readthedocs.io/",
},
license='Apache',
zip_safe=False
)
| StarcoderdataPython |
99390 | <gh_stars>0
from django.contrib import admin
from modeltranslation.admin import TranslationAdmin
from .models import InCategory, OutCategory, Income, Expense
class InCategoryAdmin(TranslationAdmin):
'''Allowing translate InCategory in admin panel'''
pass
class OutCategoryAdmin(TranslationAdmin):
'''Allowing translate OutCategory in admin panel'''
pass
admin.site.register(InCategory, InCategoryAdmin)
admin.site.register(OutCategory, OutCategoryAdmin)
#temp
admin.site.register(Income)
admin.site.register(Expense)
| StarcoderdataPython |
3301466 | import pandas as pd
import numpy as np
import re, os
from pathlib import Path
from tqdm import tqdm
import pyarrow as pa
import pyarrow.parquet as pq
from pathlib import Path
# In bash run ```$ cat *.txt>>combined.txt``` to concatenate all movie rating files into one
rating_file = Path('../../data/training_set/combined.txt')
print('Reading data file....\n')
with open(rating_file,'r',encoding='utf-8',errors='ignore') as f:
data = f.readlines()
print('Extracting data...\n')
rating_data=[]
for i,d in tqdm(enumerate(data),total=len(data)):
d=d.strip()
f=re.search('\d+:',d)
if f is not None:
movie = d[:-1]
continue
rating=d.split(',')
rating_data.append((movie,*rating))
rating_df = pd.DataFrame(rating_data,columns=['movie_id','user_id','rating','date'])
table = pa.Table.from_pandas(rating_df)
pq.write_table(table, 'rating.parquet') | StarcoderdataPython |
4810362 | <reponame>vadi2/codeql<gh_stars>1000+
import dataclasses
import dis
import logging
from dis import Instruction
from types import FrameType
from typing import Any, List
from cg_trace.settings import DEBUG, FAIL_ON_UNKNOWN_BYTECODE
from cg_trace.utils import better_compare_for_dataclass
LOGGER = logging.getLogger(__name__)
# See https://docs.python.org/3/library/dis.html#python-bytecode-instructions for
# details on the bytecode instructions
# TODO: read https://opensource.com/article/18/4/introduction-python-bytecode
class BytecodeExpr:
"""An expression reconstructed from Python bytecode
"""
@better_compare_for_dataclass
@dataclasses.dataclass(frozen=True, eq=True, order=True)
class BytecodeConst(BytecodeExpr):
"""FOR LOAD_CONST"""
value: Any
def __str__(self):
return repr(self.value)
@better_compare_for_dataclass
@dataclasses.dataclass(frozen=True, eq=True, order=True)
class BytecodeVariableName(BytecodeExpr):
name: str
def __str__(self):
return self.name
@better_compare_for_dataclass
@dataclasses.dataclass(frozen=True, eq=True, order=True)
class BytecodeAttribute(BytecodeExpr):
attr_name: str
object: BytecodeExpr
def __str__(self):
return f"{self.object}.{self.attr_name}"
@better_compare_for_dataclass
@dataclasses.dataclass(frozen=True, eq=True, order=True)
class BytecodeSubscript(BytecodeExpr):
key: BytecodeExpr
object: BytecodeExpr
def __str__(self):
return f"{self.object}[{self.key}]"
@better_compare_for_dataclass
@dataclasses.dataclass(frozen=True, eq=True, order=True)
class BytecodeTuple(BytecodeExpr):
elements: List[BytecodeExpr]
def __str__(self):
elements_formatted = (
", ".join(str(e) for e in self.elements)
if len(self.elements) > 1
else f"{self.elements[0]},"
)
return f"({elements_formatted})"
@better_compare_for_dataclass
@dataclasses.dataclass(frozen=True, eq=True, order=True)
class BytecodeList(BytecodeExpr):
elements: List[BytecodeExpr]
def __str__(self):
elements_formatted = (
", ".join(str(e) for e in self.elements)
if len(self.elements) > 1
else f"{self.elements[0]},"
)
return f"[{elements_formatted}]"
@better_compare_for_dataclass
@dataclasses.dataclass(frozen=True, eq=True, order=True)
class BytecodeCall(BytecodeExpr):
function: BytecodeExpr
def __str__(self):
return f"{self.function}()"
@better_compare_for_dataclass
@dataclasses.dataclass(frozen=True, eq=True, order=True)
class BytecodeUnknown(BytecodeExpr):
opname: str
def __str__(self):
return f"<{self.opname}>"
@better_compare_for_dataclass
@dataclasses.dataclass(frozen=True, eq=True, order=True)
class BytecodeMakeFunction(BytecodeExpr):
"""For MAKE_FUNCTION opcode"""
qualified_name: BytecodeExpr
def __str__(self):
return f"<MAKE_FUNCTION>(qualified_name={self.qualified_name})>"
@better_compare_for_dataclass
@dataclasses.dataclass(frozen=True, eq=True, order=True)
class SomethingInvolvingScaryBytecodeJump(BytecodeExpr):
opname: str
def __str__(self):
return "<SomethingInvolvingScaryBytecodeJump>"
def expr_that_added_elem_to_stack(
instructions: List[Instruction], start_index: int, stack_pos: int
):
"""Backwards traverse instructions
Backwards traverse the instructions starting at `start_index` until we find the
instruction that added the element at stack position `stack_pos` (where 0 means top
of stack). For example, if the instructions are:
```
0: LOAD_GLOBAL 0 (func)
1: LOAD_CONST 1 (42)
2: CALL_FUNCTION 1
```
We can look for the function that is called by invoking this function with
`start_index = 1` and `stack_pos = 1`. It will see that `LOAD_CONST` added the top
element to the stack, and find that `LOAD_GLOBAL` was the instruction to add element
in stack position 1 to the stack -- so `expr_from_instruction(instructions, 0)` is
returned.
It is assumed that if `stack_pos == 0` then the instruction you are looking for is
the one at `instructions[start_index]`. This might not hold, in case of using `NOP`
instructions.
If any jump instruction is found, `SomethingInvolvingScaryBytecodeJump` is returned
immediately. (since correctly process the bytecode when faced with jumps is not as
straight forward).
"""
if DEBUG:
LOGGER.debug(
f"find_inst_that_added_elem_to_stack start_index={start_index} stack_pos={stack_pos}"
)
assert stack_pos >= 0
for inst in reversed(instructions[: start_index + 1]):
# Return immediately if faced with a jump
if inst.opcode in dis.hasjabs or inst.opcode in dis.hasjrel:
return SomethingInvolvingScaryBytecodeJump(inst.opname)
if stack_pos == 0:
if DEBUG:
LOGGER.debug(f"Found it: {inst}")
found_index = instructions.index(inst)
break
old = stack_pos
stack_pos -= dis.stack_effect(inst.opcode, inst.arg)
new = stack_pos
if DEBUG:
LOGGER.debug(f"Skipping ({old} -> {new}) {inst}")
else:
raise Exception("inst_index_for_stack_diff failed")
return expr_from_instruction(instructions, found_index)
def expr_from_instruction(instructions: List[Instruction], index: int) -> BytecodeExpr:
inst = instructions[index]
if DEBUG:
LOGGER.debug(f"expr_from_instruction: {inst} index={index}")
if inst.opname in ["LOAD_GLOBAL", "LOAD_FAST", "LOAD_NAME", "LOAD_DEREF"]:
return BytecodeVariableName(inst.argval)
# elif inst.opname in ["LOAD_CONST"]:
# return BytecodeConst(inst.argval)
# https://docs.python.org/3/library/dis.html#opcode-LOAD_METHOD
# https://docs.python.org/3/library/dis.html#opcode-LOAD_ATTR
elif inst.opname in ["LOAD_METHOD", "LOAD_ATTR"]:
attr_name = inst.argval
obj_expr = expr_that_added_elem_to_stack(instructions, index - 1, 0)
return BytecodeAttribute(attr_name=attr_name, object=obj_expr)
# elif inst.opname in ["BINARY_SUBSCR"]:
# key_expr = expr_that_added_elem_to_stack(instructions, index - 1, 0)
# obj_expr = expr_that_added_elem_to_stack(instructions, index - 1, 1)
# return BytecodeSubscript(key=key_expr, object=obj_expr)
# elif inst.opname in ["BUILD_TUPLE", "BUILD_LIST"]:
# elements = []
# for i in range(inst.arg):
# element_expr = expr_that_added_elem_to_stack(instructions, index - 1, i)
# elements.append(element_expr)
# elements.reverse()
# klass = {"BUILD_TUPLE": BytecodeTuple, "BUILD_LIST": BytecodeList}[inst.opname]
# return klass(elements=elements)
# https://docs.python.org/3/library/dis.html#opcode-CALL_FUNCTION
elif inst.opname in [
"CALL_FUNCTION",
"CALL_METHOD",
"CALL_FUNCTION_KW",
"CALL_FUNCTION_EX",
]:
assert index > 0
assert isinstance(inst.arg, int)
if inst.opname in ["CALL_FUNCTION", "CALL_METHOD"]:
num_stack_elems = inst.arg
elif inst.opname == "CALL_FUNCTION_KW":
num_stack_elems = inst.arg + 1
elif inst.opname == "CALL_FUNCTION_EX":
# top of stack _can_ be keyword argument dictionary (indicated by lowest bit
# set), always followed by the positional arguments (also if there are not
# any).
num_stack_elems = (1 if inst.arg & 1 == 1 else 0) + 1
func_expr = expr_that_added_elem_to_stack(
instructions, index - 1, num_stack_elems
)
return BytecodeCall(function=func_expr)
# elif inst.opname in ["MAKE_FUNCTION"]:
# name_expr = expr_that_added_elem_to_stack(instructions, index - 1, 0)
# assert isinstance(name_expr, BytecodeConst)
# return BytecodeMakeFunction(qualified_name=name_expr)
# TODO: handle with statements (https://docs.python.org/3/library/dis.html#opcode-SETUP_WITH)
WITH_OPNAMES = ["SETUP_WITH", "WITH_CLEANUP_START", "WITH_CLEANUP_FINISH"]
# Special cases ignored for now:
#
# - LOAD_BUILD_CLASS: Called when constructing a class.
# - IMPORT_NAME: Observed to result in a call to filename='<frozen
# importlib._bootstrap>', linenum=389, funcname='parent'
if FAIL_ON_UNKNOWN_BYTECODE:
if inst.opname not in ["LOAD_BUILD_CLASS", "IMPORT_NAME"] + WITH_OPNAMES:
LOGGER.warning(
f"Don't know how to handle this type of instruction: {inst.opname}"
)
raise BaseException()
return BytecodeUnknown(inst.opname)
def expr_from_frame(frame: FrameType) -> BytecodeExpr:
bytecode = dis.Bytecode(frame.f_code, current_offset=frame.f_lasti)
if DEBUG:
LOGGER.debug(
f"{frame.f_code.co_filename}:{frame.f_lineno}: bytecode: \n{bytecode.dis()}"
)
instructions = list(iter(bytecode))
last_instruction_index = [inst.offset for inst in instructions].index(frame.f_lasti)
return expr_from_instruction(instructions, last_instruction_index)
| StarcoderdataPython |
81946 | # 07 Web Scraping
# Not every website as an API to work wtih.
# In situations like that the only way to get the data we want is to parse the html behind a webpage, get rid of all the html tags, and extract the avtual data.
# This technic is called Web Scraping
# In this example we are going to write a program that extract the list of the newst questions form https://stackoverflow.com/questions/
# This is called a Web Crawler or a Web Spider.
# For this we are going to use the "beautifulsoup4" package
import requests
from bs4 import BeautifulSoup
response = requests.get("https://stackoverflow.com/questions") # Using the ".get()" method form the "resquest" module to download the web page.
content_html = response.text # This returns the html content with the ".text" atribute
soup = BeautifulSoup(content_html, "html.parser") #
questions = soup.select(".question-summary") # We pass the CSS selector we type "." and then the name of the class. In this case ".question-summary". This will return a list.
for question in questions:
print(question.select_one(".question-hyperlink").getText()) # The ".select_one()" method can be used in this particular case because which question only as one title, so we don't need to return a list.
print(question.select_one(".vote-count-post").getText()) | StarcoderdataPython |
4801372 | import uuid
from django.db import models
from django.conf import settings
from specimens.models import Specimen
class SpecimenLabel(models.Model):
"""標本ラベル"""
class Meta:
db_table = 'specimen_labels'
ordering = ['-created_at']
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
# ソート用に更新日時を利用
created_at = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='specimens_labels',
null=True,
on_delete=models.CASCADE
)
# ユーザーが設定したラベル名
name = models.CharField(verbose_name='ラベル名',
default='NO_NAME',
max_length=30, blank=True)
label_specimens = models.ManyToManyField(Specimen,
related_name='label_specimens',
verbose_name='標本データ',
blank=True)
data_label_flag = models.BooleanField(verbose_name='データラベル作成の可否',
default=True)
coll_label_flag = models.BooleanField(verbose_name='コレクションラベル作成の可否',
default=True)
det_label_flag = models.BooleanField(verbose_name='同定ラベル作成の可否',
default=True)
note_label_flag = models.BooleanField(verbose_name='備考ラベル作成の可否',
default=True)
@property
def pdf_filename(self):
return str(self.id) + '.pdf'
| StarcoderdataPython |
3328773 | import tracemalloc
def knapsack_space_optimized_dp(value_set, weight_set, total_weight):
"""Space optimized version of `knapsack_dp`.
Here we conciously know that we only need two rows to compute the True/False"""
# Creating only two rows.
dp_table = [[0 for _ in range(total_weight + 1)] for _ in range(2)]
number_of_elements = len(value_set)
for row_num in range(1, number_of_elements + 1):
for current_weight in range(1, total_weight + 1):
element_weight = weight_set[row_num - 1]
if row_num % 2 == 0:
if current_weight < element_weight:
dp_table[1][current_weight] = dp_table[0][current_weight] # Setting from the previous row
else:
dp_table[1][current_weight] = max(
dp_table[0][current_weight],
value_set[row_num - 1] + dp_table[0][current_weight - element_weight]
)
else:
if current_weight < element_weight:
dp_table[0][current_weight] = dp_table[1][current_weight] # Setting from the previous row
else:
dp_table[0][current_weight] = max(
dp_table[1][current_weight],
value_set[row_num - 1] + dp_table[1][current_weight - element_weight]
)
return max(dp_table[1][total_weight], dp_table[0][total_weight])
def knapsack_dp(value_set, weight_set, total_weight):
"""Dynammic programming version of the knapsack problem.
Note
----
This solution works only for integers.
We'll need the branch and bound method to get it to work for floats.
Parameters
----------
value_set : [int]
weight_set : [int]
total_weight : int
Returns
-------
int
Maximum sum weight possible.
"""
number_of_elements = len(value_set)
assert number_of_elements == len(weight_set)
dp_table = [[0 for _ in range(total_weight + 1)] for _ in range(number_of_elements + 1)]
# some initialization is required.
for row_num in range(1, number_of_elements + 1):
for current_weight in range(1, total_weight + 1):
element_weight = weight_set[row_num - 1]
if element_weight > current_weight:
dp_table[row_num][current_weight] = dp_table[row_num - 1][current_weight]
else:
dp_table[row_num][current_weight] = max(
dp_table[row_num - 1][current_weight], # Current element is not chosen.
value_set[row_num - 1] + dp_table[row_num - 1][current_weight - element_weight] # Element is chosen.
)
display_knapsack_solution(
dp_table=dp_table,
value_set=value_set,
weight_set=weight_set,
sack_capacity=total_weight
)
return dp_table[number_of_elements][total_weight]
def display_knapsack_solution(dp_table, value_set, weight_set, sack_capacity):
"""Non recursive solution.
We check the solution, compare it to the previous row, if the same value,
we know the current `row` isn't included in the solution.
"""
res = dp_table[len(value_set)][sack_capacity]
current_sack_capacity = sack_capacity
solution_set = []
for row_num in range(len(value_set) + 1, 0, -1):
if res == dp_table[row_num - 1][current_sack_capacity]:
continue
solution_set.append(value_set[row_num - 1])
res = dp_table[row_num - 1][current_sack_capacity - weight_set[row_num - 1]]
current_sack_capacity = current_sack_capacity - weight_set[row_num - 1]
print(f"Solution set: {solution_set}")
def extended_knapsack_dp(value_set, weight_set, total_allowed_weight, max_number_of_elements):
"""Knapsack with limited number of elements allowed.
Notes
-----
The rows are the number of elements in value_set
Columns are the number of weights, ranging from 0 to weight_set.
The third dimension is solution for the `max_number_of_elements` allowed.
So this differs a little from the normal version of the knapsack where we link to the previous 3 dimension if the current element is selected.
i
Parameters
----------
value_set : [int]
weight_set : [int]
total_allowed_weight : int
max_number_of_elements : int
Returns
-------
int
Maximum sum weight possible.
"""
number_of_elements = len(value_set)
# Just bounding the problem as the user can give ridiculous inputs. This is not an unbounded knapsack problem.
max_number_of_elements = min(max_number_of_elements, number_of_elements)
total_allowed_weight = min(total_allowed_weight, sum(weight_set))
# Create a 3D dp table. 2D internal table is a singular knapsack table, outer dimension for number of elements allowed.
dp_table = [
[
[0 for _ in range(total_allowed_weight + 1)] for _ in range(number_of_elements + 1)
] for _ in range(max_number_of_elements + 1)
]
for dimension in range(1, max_number_of_elements + 1):
for row in range(1, len(value_set) + 1):
for current_weight in range(1, total_allowed_weight + 1):
element_weight = weight_set[row - 1]
if current_weight < element_weight:
dp_table[dimension][row][current_weight] = dp_table[dimension][row -1][current_weight]
else:
#if dp_table[dimension - 1][row][column - element_weight] + value_set[row - 1] > sum(value_set):
dp_table[dimension][row][current_weight] = max(
dp_table[dimension][row -1][current_weight], #Current element is not selected.
dp_table[dimension - 1][row -1][current_weight - element_weight] + value_set[row - 1]
)
return dp_table[max_number_of_elements][row][current_weight]
def unbounded_knapsack(sack_weight_capacity, value_set, weight_set):
"""This is a knapsack with elements that can be repeated.
So we iterate through every element each time we add a weight/solve for a total_capacity.
Parameters
----------
sack_weight_capacity : int
value_set : [int]
weight_set : [int]
Raises
------
AssertionError
If lengths of `value_set` and `weight_set` aren't equal.
Returns
-------
int : The maximum value that can go into the sack under the required weight limit.
"""
assert len(value_set) == len(weight_set), "The properties of the set must have the same length, you dumb fuck"
dp_table = [0. for _ in range(sack_weight_capacity + 1)]
for current_weight in range(1, sack_weight_capacity + 1):
for element_num in range(len(value_set)):
if weight_set[element_num] <= current_weight:
dp_table[current_weight] = max(
dp_table[current_weight],
dp_table[current_weight - weight_set[element_num]] + value_set[element_num]
)
return dp_table[sack_weight_capacity]
if __name__ == "__main__":
# Test case1: plain knapsack, hence total val is 100 + 120 + 2
val = [60, 100, 120, 2]
wt = [10, 20, 29, 1]
W = 50
tracemalloc.start()
assert knapsack_dp(value_set=val, weight_set=wt, total_weight=W) == 222
first_size, first_peak = tracemalloc.get_traced_memory()
print(f"size_dp: {first_size}, dp_peak_memory: {first_peak}")
# Test case2: space optimized knapsack, hence total val is 100 + 120 + 2
val = [60, 100, 120, 2]
wt = [10, 20, 29, 1]
W = 50
tracemalloc.clear_traces()
assert knapsack_space_optimized_dp(value_set=val, weight_set=wt, total_weight=W) == 222
second_size, second_peak = tracemalloc.get_traced_memory()
print(f"size_dp_optimized: {second_size}, dp_peak_memory: {second_peak}")
# we can choose only two elements, hence 120 + 100
val = [60, 100, 120, 2]
wt = [10, 20, 29, 1]
W = 50
tracemalloc.clear_traces()
tracemalloc.start()
ans = extended_knapsack_dp(value_set=val, weight_set=wt, total_allowed_weight=W, max_number_of_elements=2)
assert ans == 220, f"{ans}, not 220"
third_size, third_peak = tracemalloc.get_traced_memory()
print(f"extended_knapsack: {third_size}, dp_peak_memory: {third_peak}")
# Ridiculous unconstrained case, but elements cannot repeat.
val = [60, 100, 120, 2]
wt = [10, 20, 29, 1]
W = 50
ans = extended_knapsack_dp(value_set=val, weight_set=wt, total_allowed_weight=70000, max_number_of_elements=1000)
assert ans == 282, f"{ans}, not 220"
assert (unbounded_knapsack(sack_weight_capacity=29, value_set=val, weight_set=wt)) == 138
| StarcoderdataPython |
157502 | <reponame>SamJakob/PythonTCPSocketsExample
import sys
from multiprocessing import Process
from typing import Optional
from socket import socket, MSG_PEEK, AF_INET, SOCK_STREAM
# Import Queue from our utilities package.
# This should probably only be necessary on macOS, but is probably worth keeping for
# cross-compatability.
from utils import Queue
# Import our protocol specification.
# This is shared between the client and the server to ensure the values stay consistent.
import scuffed_protocol
class ConsoleInputDelegate(Process):
"""
A delegate is simply something that performs something on behalf of something else.
Hence, this class is called 'UserInputDelegate'; it is a delegate that handles User
Input for whatever class called it.
When a user enters input, it places the line into the input queue. This allows the
main process to fetch the user input when it is ready – and allows us the main
process to check if there is, indeed, some input to actually process.
We make this process a daemon to stop it from preventing the main program from
exiting.
"""
# Class constructor.
def __init__(self, queue):
# Store the provided queue.
self.queue: Queue = queue
"""The queue to write input into when it is ready."""
# Initialize the thread by making the call to the superclass constructor.
super().__init__(daemon=True)
def run(self) -> None:
# Open the standard input stream from the main process.
# We need to do this explicitly, because it is not shared with additional
# processes by default.
sys.stdin = open(0)
# For the lifetime of the thread, we'll continuously wait for entered data.
while True:
# Wait for user input and then execute the provided callback.
self.queue.put(input())
class MyClient:
# Class constructor.
# Here, we initialize our socket value to 'None' (Python's version of null)
# and initialize the UserInputThread.
def __init__(self):
self.socket: Optional[socket] = None
"""The client socket that gets connected to the server."""
self.console_input_queue = Queue()
"""The queue of lines that the user has entered into the console, that have yet to be processed."""
self.console_input_delegate = ConsoleInputDelegate(queue=self.console_input_queue)
"""The delegate class that waits for user input."""
def receive_message(self) -> str:
"""
Attempts to receive a message by first reading the number of bytes in the string
and then the string itself by reading in that many bytes and UTF-8 decoding them.
:return: The received message.
"""
def read_bytes(num_bytes: int, read_function):
"""
Ensure the entire number of bytes is read. 'read_function' is the function that
should be used to read up to a certain number of bytes from your socket.
(Usually the 'recv' function on your socket.)
"""
# Initialize an empty bytes object to store the received bytes.
data = bytes()
# Initialize a counter for the number of bytes we need to receive.
waiting_for_bytes = num_bytes
while waiting_for_bytes > 0:
# Attempt to read up to 'waiting_for_bytes' bytes.
received_bytes: bytes = read_function(waiting_for_bytes)
# Add the received bytes to our data object.
data += received_bytes
# Subtract the number of bytes received from the number of bytes
# we're waiting for.
waiting_for_bytes -= len(received_bytes)
# Finally, return the collected bytes.
return data
# If we're here, it means an exception wasn't raised, so we can
# continue to read data from the socket, knowing data is there
# for us to read.
# TODO: We simply assume that this packet starts with an integer
# indicating the number of bytes. If it didn't, this program
# would break.
# - How can your program handle this gracefully?
# ...
# This is also an important consideration for your protocol –
# how should incorrect messages be handled?
# Read the number of bytes for the string length. This is simply
# the inverse of what we do in send_message, so you can refer to
# that for details (below).
str_length = int.from_bytes(read_bytes(2, self.socket.recv), byteorder='big')
# Now, we receive that many bytes and UTF-8 decode them into a
# string.
return read_bytes(str_length, self.socket.recv).decode('utf-8')
def send_message(self, message: str) -> None:
"""
Sends the specified message to the server if the socket is not equal to
None (i.e., if the socket is connected).
:param message: The message to send.
"""
# Raise an exception if the string is too long.
if len(message) > 65535:
raise ValueError("The specified message is too long. It must be at most 65535 bytes.")
# TODO: It might be worth Google-ing 'endianness'.
# Endianness is the order in which binary numbers are stored and processed
# by the CPU and it is therefore an important thing in Computer Networking.
# ...
# You should take care to ensure that this is consistent, either with
# your protocol or with your systems or programming languages.
# ...
# Intel systems are little-endian.
# Networking systems, non-Intel systems and specifically the Java programming
# language are big-endian and for this reason 'big-endianness' has been
# selected here to maintain compatability with the Java example.
self.socket.send(
# Send the length of the message (which may be up to 65535 – or
# the size of a 16-bit integer, a.k.a., 2 bytes). We use 'big-endian' byte order.
len(message).to_bytes(2, byteorder='big') +
# Then, send the encoded message itself.
message.encode(encoding='utf-8')
)
def start(self) -> None:
"""
Starts our client by opening a connection to the server on the
protocol port and accepting user input to send to the server.
"""
try:
# Initialize our socket and connect to the server.
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.connect(('localhost', scuffed_protocol.PORT))
# We set our socket to non-blocking to allow us to 'poll' the socket
# for available bytes. You could consider this a bit of a hack, and
# perhaps better practice would be to buffer input lines like we do
# for console input, though this one depends more on your protocol
# and what kind of data you expect from the user as well as when you
# expect it.
self.socket.setblocking(False)
# We use 'sys.stdout.write' instead of 'print' to avoid printing a
# newline at the end of the string. We then use 'flush' to ensure
# it gets printed.
# (Python sometimes buffers output until a newline is encountered.)
sys.stdout.write("> ")
sys.stdout.flush()
# Now, start the input thread that waits for user input. When we
# initialized the input thread in our constructor, we passed in the
# 'send_message' function which the input thread will call whenever
# a line of input is entered. In this case, it will deliver that
# line of input to the server.
self.console_input_delegate.start()
# Keep looping until the loop is ended with a 'break' statement. There's
# no convenient way to check if a socket is closed in Python because TCP
# doesn't include this functionality.
# There's no convenient way, in Python, to check if we've closed a socket,
# so we keep looping until socket is set to None, which is our way of
# indicating that it's been closed.
# TODO: How could you include a feature in your protocol that checks if the
# server's connection has dropped unexpectedly?
while self.socket is not None:
try:
# TODO: when should a user be able to send messages?
# Should every outgoing message expect a response before another
# message can be sent?
# If we have some entries ready in our input queue, (i.e., someone
# has typed something into the console), then we read and process
# the input.
if self.console_input_queue.qsize() > 0:
message = self.console_input_queue.get()
self.send_message(message)
# If the message was 'exit', clean up and exit.
if message == "exit":
# Close the socket and set it to None to 'signal' to our program that
# the socket is now closed.
self.socket.close()
self.socket = None
# We now break out of the loop.
break
# Next, if our socket has some bytes available, we read and process
# them. (In this case, we just print them out and re-print the prompt
# to show we're ready for user input.)
# Python does not allow you to determine how many bytes are available
# from the input stream, however because we set our socket to
# non-blocking above, we can peek the first byte and see if a
# BlockingIOError is raised. If that error is raised it means there are
# no bytes available.
# Again, this is a bit of a hack, depending on your protocol (e.g.,
# whether it receives messages a line at time) it may be better to
# rewrite this into something resembling the way the console input
# works (e.g., in other thread buffer input from the socket, then
# add it to a queue.)
try:
# Attempt to peek the first byte from the socket.
self.socket.recv(1, MSG_PEEK)
message = self.receive_message()
# ...and now we print the message. (We print a newline before to
# ensure we don't print after the prompt.)
print(message)
# Finally, we re-print the prompt to indicate to the user that
# we're ready for user input again.
sys.stdout.write('> ')
sys.stdout.flush()
except BlockingIOError:
# Simply do nothing. 'pass' in Python is a no-op (meaning it is
# an instruction that does nothing.)
# It is equivalent to having empty braces in Java or other C-like
# languages.
pass
except ConnectionError:
print("A communication error occurred with the server.")
print("\nConnection closed.")
except ConnectionError as connectionError:
# Print a message to the standard error stream.
# This is equivalent to System.error.println in Java.
print("Failed to connect to the server. Is it running?", file=sys.stderr)
print(connectionError, file=sys.stderr)
# Check if the currently running module is the __main__ module
# (This is essentially Python's version of a main method.)
if __name__ == '__main__':
my_client = MyClient()
my_client.start()
| StarcoderdataPython |
3357536 | <gh_stars>1000+
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The ALBERT configurations."""
import six
from official.nlp.bert import configs
class AlbertConfig(configs.BertConfig):
"""Configuration for `ALBERT`."""
def __init__(self, num_hidden_groups=1, inner_group_num=1, **kwargs):
"""Constructs AlbertConfig.
Args:
num_hidden_groups: Number of group for the hidden layers, parameters in
the same group are shared. Note that this value and also the following
'inner_group_num' has to be 1 for now, because all released ALBERT
models set them to 1. We may support arbitary valid values in future.
inner_group_num: Number of inner repetition of attention and ffn.
**kwargs: The remaining arguments are the same as above 'BertConfig'.
"""
super(AlbertConfig, self).__init__(**kwargs)
# TODO(chendouble): 'inner_group_num' and 'num_hidden_groups' are always 1
# in the released ALBERT. Support other values in AlbertEncoder if needed.
if inner_group_num != 1 or num_hidden_groups != 1:
raise ValueError("We only support 'inner_group_num' and "
"'num_hidden_groups' as 1.")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `AlbertConfig` from a Python dictionary of parameters."""
config = AlbertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
| StarcoderdataPython |
1727249 | <reponame>luizschmall/tce_siconfi_inconsistencies
import pandas
import string
import math
import csv
import os
from unicodedata import normalize
def remover_acentos(txt):
return normalize('NFKD', txt).encode('ASCII', 'ignore').decode('ASCII')
def containsNumber(line):
res = False
numero = 0
if any(i.isdigit() for i in str(line)):
res = True
line = str(line).split(" ")
for l in line:
if any(k.isdigit() for k in l):
l = l.replace(".", "")
l = l.replace(",", "")
l = l[:-2] + "." + l[-2:]
try:
numero = float(l)
except:
numero = 0
return res, numero
def buscaKeyParts(diretorio, file, key):
df = pandas.read_csv(diretorio + file, names=list(range(0, 10)))
mask = df.applymap(lambda x: key.upper() in remover_acentos(str(x).upper()))
# print(mask)
df1 = df[mask.any(axis=1)]
print(df1)
i = 0
j = 0
resultado = [0, 0, 0, 0, 0, 0]
if df1.empty == False:
for (columnName, columnData) in df1.iteritems():
if key.upper() in remover_acentos(str(columnData.values[0]).upper()):
j = 1
print('Colunm Name : ', columnName)
print('Column Contents : ', columnData.values)
if j == 1 and columnData.values[0] and (
isinstance(columnData.values[0], float) and math.isnan(columnData.values[0])) == False:
containnumber1, containnumber2 = containsNumber(columnData.values[0])
print('contain number : ', containnumber1, containnumber2)
if containnumber1 == True and i < 6:
resultado[i] = containnumber2
i += 1
return resultado
def main():
diretorio = "C:\\Users\\schmall\\Documents\\FGV\\Tese\\Balanços_PI\\BALORC\\ORIG\\RESULT2_despesas\\"
files = os.listdir(diretorio)
csv_files = [f for f in files if f.endswith('.csv')]
files2 = [d for d in csv_files if 'tables' in d]
new = ""
despesas_correntes = [" ", " ", " ", " ", " ", " "]
pessoal_encargos_sociais = [" ", " ", " ", " ", " ", " "]
juros_encargos_divida = [" ", " ", " ", " ", " ", " "]
outras_despesas_correntes = [" ", " ", " ", " ", " ", " "]
despesas_capital = [" ", " ", " ", " ", " ", " "]
investimentos = [" ", " ", " ", " ", " ", " "]
inversoes_financeiras = [" ", " ", " ", " ", " ", " "]
amortizacao_divida = [" ", " ", " ", " ", " ", " "]
reserva_contingencia = [" ", " ", " ", " ", " ", " "]
reserva_rpps = [" ", " ", " ", " ", " ", " "]
subtotal_despesas = [" ", " ", " ", " ", " ", " "]
amortizacao_divida_refinanciamento = [" ", " ", " ", " ", " ", " "]
subtotal_refinanciamento_d = [" ", " ", " ", " ", " ", " "]
for file in files2:
print(file)
file_parts = file.split(".")
if file_parts[0] != new:
with open(diretorio + new + "_tratado.csv", mode='a+') as balorc_file:
balorc_writer = csv.writer(balorc_file, delimiter=';', quoting=csv.QUOTE_NONNUMERIC)
if despesas_correntes[0] == 0 and despesas_correntes[1] == 0:
balorc_writer.writerow(['DESPESAS CORRENTES', despesas_correntes[0], despesas_correntes[1], despesas_correntes[2], despesas_correntes[3], despesas_correntes[4], despesas_correntes[5]])
if pessoal_encargos_sociais[0] == 0 and pessoal_encargos_sociais[1] == 0:
balorc_writer.writerow(['PESSOAL E ENCARGOS SOCIAIS', pessoal_encargos_sociais[0], pessoal_encargos_sociais[1], pessoal_encargos_sociais[2], pessoal_encargos_sociais[3], pessoal_encargos_sociais[4], pessoal_encargos_sociais[5]])
if juros_encargos_divida[0] == 0 and juros_encargos_divida[1] == 0:
balorc_writer.writerow(['JUROS E ENCARGOS DA DIVIDA', juros_encargos_divida[0], juros_encargos_divida[1], juros_encargos_divida[2], juros_encargos_divida[3], juros_encargos_divida[4], juros_encargos_divida[5]])
if outras_despesas_correntes[0] == 0 and outras_despesas_correntes[1] == 0:
balorc_writer.writerow(['OUTRAS DESPESAS CORRENTES', outras_despesas_correntes[0], outras_despesas_correntes[1], outras_despesas_correntes[2], outras_despesas_correntes[3], outras_despesas_correntes[4], outras_despesas_correntes[5]])
if despesas_capital[0] == 0 and despesas_capital[1] == 0:
balorc_writer.writerow(['DESPESAS DE CAPITAL', despesas_capital[0], despesas_capital[1], despesas_capital[2], despesas_capital[3], despesas_capital[4], despesas_capital[5]])
if investimentos[0] == 0 and investimentos[1] == 0:
balorc_writer.writerow(['INVESTIMENTOS', investimentos[0], investimentos[1], investimentos[2], investimentos[3], investimentos[4], investimentos[5]])
if inversoes_financeiras[0] == 0 and inversoes_financeiras[1] == 0:
balorc_writer.writerow(
['INVERSOES FINANCEIRAS', inversoes_financeiras[0], inversoes_financeiras[1], inversoes_financeiras[2], inversoes_financeiras[3], inversoes_financeiras[4], inversoes_financeiras[5]])
if amortizacao_divida[0] == 0 and amortizacao_divida[1] == 0:
balorc_writer.writerow(['AMORTIZACAO DA DIVIDA', amortizacao_divida[0], amortizacao_divida[1], amortizacao_divida[2], amortizacao_divida[3], amortizacao_divida[4], amortizacao_divida[5]])
if reserva_contingencia[0] == 0 and reserva_contingencia[1] == 0:
balorc_writer.writerow(['RESERVA DE CONTINGENCIA', reserva_contingencia[0], reserva_contingencia[1], reserva_contingencia[2], reserva_contingencia[3], reserva_contingencia[4], reserva_contingencia[5]])
if reserva_rpps[0] == 0 and reserva_rpps[1] == 0:
balorc_writer.writerow(['RESERVA DO RPPS', reserva_rpps[0], reserva_rpps[1], reserva_rpps[2], reserva_rpps[3], reserva_rpps[4], reserva_rpps[5]])
if subtotal_despesas[0] == 0 and subtotal_despesas[1] == 0:
balorc_writer.writerow(['SUBTOTAL DAS DESPESAS', subtotal_despesas[0], subtotal_despesas[1], subtotal_despesas[2], subtotal_despesas[3], subtotal_despesas[4], subtotal_despesas[5]])
if amortizacao_divida_refinanciamento[0] == 0 and amortizacao_divida_refinanciamento[1] == 0:
balorc_writer.writerow(
['AMORTIZACAO DA DIVIDA - REFINANCIAMENTO', amortizacao_divida_refinanciamento[0], amortizacao_divida_refinanciamento[1], amortizacao_divida_refinanciamento[2], amortizacao_divida_refinanciamento[3], amortizacao_divida_refinanciamento[4], amortizacao_divida_refinanciamento[5]])
if subtotal_refinanciamento_d[0] == 0 and subtotal_refinanciamento_d[1] == 0:
balorc_writer.writerow(['SUBTOTAL COM REFINANCIAMENTO (XV)', subtotal_refinanciamento_d[0], subtotal_refinanciamento_d[1], subtotal_refinanciamento_d[2], subtotal_refinanciamento_d[3], subtotal_refinanciamento_d[4], subtotal_refinanciamento_d[5]])
new = file_parts[0]
with open(diretorio + file_parts[0] + "_tratado.csv", mode='w+') as balorc_file:
balorc_writer = csv.writer(balorc_file, delimiter=';', quoting=csv.QUOTE_NONNUMERIC)
balorc_writer.writerow(["Key", "1", "2", "3", "4", "5", "6"])
despesas_correntes = buscaKeyParts(diretorio, file, 'DESPESAS CORRENTES')
print("despesas_correntes", despesas_correntes)
if despesas_correntes[0] != 0 or despesas_correntes[1] != 0:
balorc_writer.writerow(['DESPESAS CORRENTES', despesas_correntes[0], despesas_correntes[1], despesas_correntes[2], despesas_correntes[3], despesas_correntes[4], despesas_correntes[5]])
pessoal_encargos_sociais = buscaKeyParts(diretorio, file, 'PESSOAL E ENCARGOS SOCIAIS')
print("pessoal_encargos_sociais", pessoal_encargos_sociais)
if pessoal_encargos_sociais[0] != 0 or pessoal_encargos_sociais[1] != 0:
balorc_writer.writerow(['PESSOAL E ENCARGOS SOCIAIS', pessoal_encargos_sociais[0], pessoal_encargos_sociais[1], pessoal_encargos_sociais[2], pessoal_encargos_sociais[3], pessoal_encargos_sociais[4], pessoal_encargos_sociais[5]])
juros_encargos_divida = buscaKeyParts(diretorio, file, 'JUROS E ENCARGOS DA DIVIDA')
print("juros_encargos_divida", juros_encargos_divida)
if juros_encargos_divida[0] != 0 or juros_encargos_divida[1] != 0:
balorc_writer.writerow(['JUROS E ENCARGOS DA DIVIDA', juros_encargos_divida[0], juros_encargos_divida[1], juros_encargos_divida[2], juros_encargos_divida[3], juros_encargos_divida[4], juros_encargos_divida[5]])
outras_despesas_correntes = buscaKeyParts(diretorio, file, 'OUTRAS DESPESAS CORRENTES')
print("outras_despesas_correntes", outras_despesas_correntes)
if outras_despesas_correntes[0] != 0 or outras_despesas_correntes[1] != 0:
balorc_writer.writerow(['OUTRAS DESPESAS CORRENTES', outras_despesas_correntes[0], outras_despesas_correntes[1], outras_despesas_correntes[2], outras_despesas_correntes[3], outras_despesas_correntes[4], outras_despesas_correntes[5]])
despesas_capital = buscaKeyParts(diretorio, file, 'DESPESAS DE CAPITAL')
print("despesas_capital", despesas_capital)
if despesas_capital[0] != 0 or despesas_capital[1] != 0:
balorc_writer.writerow(['DESPESAS DE CAPITAL', despesas_capital[0], despesas_capital[1], despesas_capital[2], despesas_capital[3], despesas_capital[4], despesas_capital[5]])
investimentos = buscaKeyParts(diretorio, file, 'INVESTIMENTOS')
print("investimentos", investimentos)
if investimentos[0] != 0 or investimentos[1] != 0:
balorc_writer.writerow(['INVESTIMENTOS', investimentos[0], investimentos[1], investimentos[2], investimentos[3], investimentos[4], investimentos[5]])
inversoes_financeiras = buscaKeyParts(diretorio, file, 'INVERSOES FINANCEIRAS')
print("inversoes_financeiras", inversoes_financeiras)
if inversoes_financeiras[0] != 0 or inversoes_financeiras[1] != 0:
balorc_writer.writerow(
['INVERSOES FINANCEIRAS', inversoes_financeiras[0], inversoes_financeiras[1], inversoes_financeiras[2], inversoes_financeiras[3], inversoes_financeiras[4], inversoes_financeiras[5]])
amortizacao_divida = buscaKeyParts(diretorio, file, 'AMORTIZACAO DA DIVIDA')
print("amortizacao_divida", amortizacao_divida)
if amortizacao_divida[0] != 0 or amortizacao_divida[1] != 0:
balorc_writer.writerow(['AMORTIZACAO DA DIVIDA', amortizacao_divida[0], amortizacao_divida[1], amortizacao_divida[2], amortizacao_divida[3], amortizacao_divida[4], amortizacao_divida[5]])
reserva_contingencia = buscaKeyParts(diretorio, file, 'RESERVA DE CONTINGENCIA')
print("reserva_contingencia", reserva_contingencia)
if reserva_contingencia[0] != 0 or reserva_contingencia[1] != 0:
balorc_writer.writerow(['RESERVA DE CONTINGENCIA', reserva_contingencia[0], reserva_contingencia[1], reserva_contingencia[2], reserva_contingencia[3], reserva_contingencia[4], reserva_contingencia[5]])
reserva_rpps = buscaKeyParts(diretorio, file, 'RESERVA DO RPPS')
print("reserva_rpps", reserva_rpps)
if reserva_rpps[0] != 0 or reserva_rpps[1] != 0:
balorc_writer.writerow(['RESERVA DO RPPS', reserva_rpps[0], reserva_rpps[1], reserva_rpps[2], reserva_rpps[3], reserva_rpps[4], reserva_rpps[5]])
subtotal_despesas = buscaKeyParts(diretorio, file, 'SUBTOTAL DAS DESPESAS')
print("subtotal_despesas", subtotal_despesas)
if subtotal_despesas[0] != 0 or subtotal_despesas[1] != 0:
balorc_writer.writerow(['SUBTOTAL DAS DESPESAS', subtotal_despesas[0], subtotal_despesas[1], subtotal_despesas[2], subtotal_despesas[3], subtotal_despesas[4], subtotal_despesas[5]])
amortizacao_divida_refinanciamento = buscaKeyParts(diretorio, file, 'AMORTIZACAO DA DIVIDA - REFINANCIAMENTO')
print("amortizacao_divida_refinanciamento", amortizacao_divida_refinanciamento)
if amortizacao_divida_refinanciamento[0] != 0 or amortizacao_divida_refinanciamento[1] != 0:
balorc_writer.writerow(
['AMORTIZACAO DA DIVIDA - REFINANCIAMENTO', amortizacao_divida_refinanciamento[0], amortizacao_divida_refinanciamento[1], amortizacao_divida_refinanciamento[2], amortizacao_divida_refinanciamento[3], amortizacao_divida_refinanciamento[4], amortizacao_divida_refinanciamento[5]])
subtotal_refinanciamento_d = buscaKeyParts(diretorio, file, 'SUBTOTAL COM REFINANCIAMENTO (XV)')
print("subtotal_refinanciamento_d", subtotal_refinanciamento_d)
if subtotal_refinanciamento_d[0] != 0 or subtotal_refinanciamento_d[1] != 0:
balorc_writer.writerow(['SUBTOTAL COM REFINANCIAMENTO (XV)', subtotal_refinanciamento_d[0], subtotal_refinanciamento_d[1], subtotal_refinanciamento_d[2], subtotal_refinanciamento_d[3], subtotal_refinanciamento_d[4], subtotal_refinanciamento_d[5]])
else:
with open(diretorio + file_parts[0] + "_tratado.csv", mode='a+') as balorc_file:
balorc_writer = csv.writer(balorc_file, delimiter=';', quoting=csv.QUOTE_NONNUMERIC)
if despesas_correntes[0] == 0 and despesas_correntes[1] == 0:
despesas_correntes = buscaKeyParts(diretorio, file, '<NAME>')
print("despesas_correntes", despesas_correntes)
if despesas_correntes[0] != 0 or despesas_correntes[1] != 0:
balorc_writer.writerow(['DESPESAS CORRENTES', despesas_correntes[0], despesas_correntes[1], despesas_correntes[2], despesas_correntes[3], despesas_correntes[4], despesas_correntes[5]])
if pessoal_encargos_sociais[0] == 0 and pessoal_encargos_sociais[1] == 0:
pessoal_encargos_sociais = buscaKeyParts(diretorio, file, 'PESSOAL E ENCARGOS SOCIAIS')
print("pessoal_encargos_sociais", pessoal_encargos_sociais)
if pessoal_encargos_sociais[0] != 0 or pessoal_encargos_sociais[1] != 0:
balorc_writer.writerow(['PESSOAL E ENCARGOS SOCIAIS', pessoal_encargos_sociais[0], pessoal_encargos_sociais[1], pessoal_encargos_sociais[2], pessoal_encargos_sociais[3], pessoal_encargos_sociais[4], pessoal_encargos_sociais[5]])
if juros_encargos_divida[0] == 0 and juros_encargos_divida[1] == 0:
juros_encargos_divida = buscaKeyParts(diretorio, file, 'JUROS E ENCARGOS DA DIVIDA')
print("juros_encargos_divida", juros_encargos_divida)
if juros_encargos_divida[0] != 0 or juros_encargos_divida[1] != 0:
balorc_writer.writerow(['JUROS E ENCARGOS DA DIVIDA', juros_encargos_divida[0], juros_encargos_divida[1], juros_encargos_divida[2], juros_encargos_divida[3], juros_encargos_divida[4], juros_encargos_divida[5]])
if outras_despesas_correntes[0] == 0 and outras_despesas_correntes[1] == 0:
outras_despesas_correntes = buscaKeyParts(diretorio, file, 'OUTRAS DESPESAS CORRENTES')
print("outras_despesas_correntes", outras_despesas_correntes)
if outras_despesas_correntes[0] != 0 or outras_despesas_correntes[1] != 0:
balorc_writer.writerow(['OUTRAS DESPESAS CORRENTES', outras_despesas_correntes[0], outras_despesas_correntes[1], outras_despesas_correntes[2], outras_despesas_correntes[3], outras_despesas_correntes[4], outras_despesas_correntes[5]])
if despesas_capital[0] == 0 and despesas_capital[1] == 0:
despesas_capital = buscaKeyParts(diretorio, file, 'DESPESAS DE CAPITAL')
print("despesas_capital", despesas_capital)
if despesas_capital[0] != 0 or despesas_capital[1] != 0:
balorc_writer.writerow(['DESPESAS DE CAPITAL', despesas_capital[0], despesas_capital[1], despesas_capital[2], despesas_capital[3], despesas_capital[4], despesas_capital[5]])
if investimentos[0] == 0 and investimentos[1] == 0:
investimentos = buscaKeyParts(diretorio, file, 'INVESTIMENTOS')
print("investimentos", investimentos)
if investimentos[0] != 0 or investimentos[1] != 0:
balorc_writer.writerow(['INVESTIMENTOS', investimentos[0], investimentos[1], investimentos[2], investimentos[3], investimentos[4], investimentos[5]])
if inversoes_financeiras[0] == 0 and inversoes_financeiras[1] == 0:
inversoes_financeiras = buscaKeyParts(diretorio, file, 'INVERSOES FINANCEIRAS')
print("inversoes_financeiras", inversoes_financeiras)
if inversoes_financeiras[0] != 0 or inversoes_financeiras[1] != 0:
balorc_writer.writerow(
['INVERSOES FINANCEIRAS', inversoes_financeiras[0], inversoes_financeiras[1], inversoes_financeiras[2], inversoes_financeiras[3], inversoes_financeiras[4], inversoes_financeiras[5]])
if amortizacao_divida[0] == 0 and amortizacao_divida[1] == 0:
amortizacao_divida = buscaKeyParts(diretorio, file, 'AMORTIZACAO DA DIVIDA')
print("amortizacao_divida", amortizacao_divida)
if amortizacao_divida[0] != 0 or amortizacao_divida[1] != 0:
balorc_writer.writerow(['AMORTIZACAO DA DIVIDA', amortizacao_divida[0], amortizacao_divida[1], amortizacao_divida[2], amortizacao_divida[3], amortizacao_divida[4], amortizacao_divida[5]])
if reserva_contingencia[0] == 0 and reserva_contingencia[1] == 0:
reserva_contingencia = buscaKeyParts(diretorio, file, 'RESERVA DE CONTINGENCIA')
print("reserva_contingencia", reserva_contingencia)
if reserva_contingencia[0] != 0 or reserva_contingencia[1] != 0:
balorc_writer.writerow(['RESERVA DE CONTINGENCIA', reserva_contingencia[0], reserva_contingencia[1], reserva_contingencia[2], reserva_contingencia[3], reserva_contingencia[4], reserva_contingencia[5]])
if reserva_rpps[0] == 0 and reserva_rpps[1] == 0:
reserva_rpps = buscaKeyParts(diretorio, file, 'RESERVA DO RPPS')
print("reserva_rpps", reserva_rpps)
if reserva_rpps[0] != 0 or reserva_rpps[1] != 0:
balorc_writer.writerow(['RESERVA DO RPPS', reserva_rpps[0], reserva_rpps[1], reserva_rpps[2], reserva_rpps[3], reserva_rpps[4], reserva_rpps[5]])
if subtotal_despesas[0] == 0 and subtotal_despesas[1] == 0:
subtotal_despesas = buscaKeyParts(diretorio, file, 'SUBTOTAL DAS DESPESAS')
print("subtotal_despesas", subtotal_despesas)
if subtotal_despesas[0] != 0 or subtotal_despesas[1] != 0:
balorc_writer.writerow(['SUBTOTAL DAS DESPESAS', subtotal_despesas[0], subtotal_despesas[1], subtotal_despesas[2], subtotal_despesas[3], subtotal_despesas[4], subtotal_despesas[5]])
if amortizacao_divida_refinanciamento[0] == 0 and amortizacao_divida_refinanciamento[1] == 0:
amortizacao_divida_refinanciamento = buscaKeyParts(diretorio, file, 'REFINANCIAMENTO DA DIVIDA - REFINANCIAMENTO')
print("amortizacao_divida_refinanciamento", amortizacao_divida_refinanciamento)
if amortizacao_divida_refinanciamento[0] != 0 or amortizacao_divida_refinanciamento[1] != 0:
balorc_writer.writerow(
['REFINANCIAMENTO DA DIVIDA - REFINANCIAMENTO', amortizacao_divida_refinanciamento[0], amortizacao_divida_refinanciamento[1], amortizacao_divida_refinanciamento[2], amortizacao_divida_refinanciamento[3], amortizacao_divida_refinanciamento[4], amortizacao_divida_refinanciamento[5]])
if subtotal_refinanciamento_d[0] == 0 and subtotal_refinanciamento_d[1] == 0:
subtotal_refinanciamento_d = buscaKeyParts(diretorio, file, 'SUBTOTAL COM REFINANCIAMENTO (XV)')
print("subtotal_refinanciamento_d", subtotal_refinanciamento_d)
if subtotal_refinanciamento_d[0] != 0 or subtotal_refinanciamento_d[1] != 0:
balorc_writer.writerow(['SUBTOTAL COM REFINANCIAMENTO (XV)', subtotal_refinanciamento_d[0], subtotal_refinanciamento_d[1], subtotal_refinanciamento_d[2], subtotal_refinanciamento_d[3], subtotal_refinanciamento_d[4], subtotal_refinanciamento_d[5]])
if __name__ == "__main__":
main()
| StarcoderdataPython |
4834744 | <gh_stars>0
from django.conf.urls import url
from .views import pitList, detail, lock
urlpatterns = [
url(r'^$', pitList, name='pit_list'),
url(r'^detail$', detail, name='pit_detail'),
url(r'^lock$', lock, name='pit_lock')
]
| StarcoderdataPython |
130725 | #!/usr/bin/env python3
BASE_URL = 'https://enlighten.enphaseenergy.com'
STARTING_URL = BASE_URL + '/login'
LOGIN_URL = BASE_URL + '/login/login'
LOGIN_SUCCESS_URL = BASE_URL + '/systems'
STATE_DIR = '~/.enphase_scraper'
import os
import errno
import http.cookiejar
import urllib
import urllib.request
import urllib.error
from lxml.html import fromstring
from lxml.html import submit_form
from getpass import getpass
import argparse
import re
import json
def report_error(msg):
print(msg)
return
parser = argparse.ArgumentParser()
parser.add_argument('--statedir', dest='statedir', default=os.path.expanduser(STATE_DIR),
metavar='DIR',
help='directory to store state (default: %(default)s)')
args = parser.parse_args()
if args.statedir[-1] != '/' :
args.statedir += '/'
print(args.statedir)
# create the state directory, if it does not exist
try:
os.makedirs(args.statedir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# prompt for username and password
username = input('Username: ')
password = getpass('Password: ')
cookies = http.cookiejar.MozillaCookieJar(os.path.expanduser(args.statedir) + 'cookiejar.txt')
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cookies),
)
opener.addheaders = [
('User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11'),
]
urllib.request.install_opener(opener)
page = opener.open(STARTING_URL)
data = page.read()
doc = fromstring(data)
# iterate through the forms on the page, until we find the login form
for form in doc.xpath('//form[@action="/login/login"]'):
if not 'user[email]' in form.fields.keys():
#print('Username field does not exist!')
continue;
if not 'user[password]' in form.fields.keys():
#print('Password field does not exist!')
continue;
print('Found the login form.')
form.fields['user[email]'] = username
form.fields['user[password]'] = password
doc.make_links_absolute(STARTING_URL)
#print(form.method)
#print(form.action)
fields = {}
#for key in form.fields:
#print(key, form.fields[key])
#fields[key] = form.fields[key].encode('utf-8')
#print(type(fields[key]))
#s = form.fields[key].decode('unicode')
#print(s)
#form.fields[key] = s.encode('utf-8')
#print(form.fields[key])
formdata = urllib.parse.urlencode(form.fields)
#@@@TODO: GET vs POST
page2 = urllib.request.urlopen(form.action, bytes(formdata, 'utf-8'))
data2 = page2.read()
print(page2.geturl())
# Check the URL we were sent to, to see if login succeeded
if not page2.geturl().startswith(LOGIN_SUCCESS_URL):
print(page2.geturl())
report_error('Login failed')
exit()
else:
print('Login succeeded!')
# The login apparently succeeded, so try to extract the system ID
# from the URL.
m = re.search('^' + LOGIN_SUCCESS_URL + '/(\d+)(/.*)?$', page2.geturl())
try:
system_id = m.group(1)
except IndexError:
system_id = None
except AttributeError:
system_id = None
# Put the system ID in an object and save it to the state file
state_obj = {'system_id': system_id}
with open(os.path.expanduser(args.statedir) + 'state.json', 'w') as statefile:
statefile.write(json.dumps(state_obj, indent=4, separators=(',', ': ')))
# Save the session
cookies.save(ignore_discard=True)
break
| StarcoderdataPython |
1725217 | from pathlib import Path
import json
from .utils import write_jsonfile, check_accessmode
class MetaData:
"""Dictionary-like access to disk based metadata.
If there is no metadata, the metadata file does not exist, rather than
being empty. This saves a block of disk space (potentially 4kb).
"""
def __init__(self, path, accessmode='r', callatfilecreationordeletion=None):
path = Path(path)
self._path = path
self._accessmode = check_accessmode(accessmode)
self._callatfilecreationordeletion = callatfilecreationordeletion
@property
def path(self):
return self._path
@property
def accessmode(self):
"""
Set data access mode of metadata.
Parameters
----------
accessmode: {'r', 'r+'}, default 'r'
File access mode of the data. `r` means read-only, `r+`
means read-write.
"""
return self._accessmode
@accessmode.setter
def accessmode(self, value):
self._accessmode = check_accessmode(value)
def __getitem__(self, item):
return self._read()[item]
def __setitem__(self, key, value):
self.update({key: value})
def __delitem__(self, key):
self.pop(key)
def __len__(self):
return len(self.keys())
def __repr__(self):
return str(self._read())
def __contains__(self, item):
return item in self.keys()
__str__ = __repr__
def _read(self):
if not self._path.exists():
return {}
with open(self._path, 'r') as fp:
return json.load(fp)
def get(self, *args):
"""metadata.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
"""
return self._read().get(*args)
def items(self):
"""a set-like object providing a view on D's items"""
return self._read().items()
def keys(self):
"""D.keys() -> a set-like object providing a view on D's keys"""
return self._read().keys()
# FIXME remove overlap with popitem
def pop(self, *args):
"""D.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised
"""
if self._accessmode == 'r':
raise OSError("metadata not writeable; change 'accessmode' to "
"'r+'")
metadata = self._read()
val = metadata.pop(*args)
if metadata:
write_jsonfile(self.path, data=metadata, sort_keys=True,
ensure_ascii=True, overwrite=True)
else:
self._path.unlink()
self._callatfilecreationordeletion()
return val
def popitem(self):
"""D.pop() -> k, v, returns and removes an arbitrary element (key,
value) pair from the dictionary.
"""
if self._accessmode == 'r':
raise OSError("metadata not writeable; change 'accessmode' to "
"'r+'")
metadata = self._read()
key, val = metadata.popitem()
if metadata:
write_jsonfile(self.path, data=metadata, sort_keys=True,
ensure_ascii=True, overwrite=True)
else:
self._path.unlink()
self._callatfilecreationordeletion()
return key, val
def values(self):
return self._read().values()
def update(self, *arg, **kwargs):
"""Updates metadata.
Metadata are written to disk.
Parameters
----------
arg: a dictionary with metadata keys and values, optional
kwargs: keyword arguments, corresponding to keys and values, optional
Returns
-------
None
Examples
--------
>>> import darr as da
>>> d = da.create_array('test.da', shape=(12,), accesmode= 'r+')
>>> d.metadata.update({'starttime': '2017-08-31T17:00:00'})
>>> print(d.metadata)
{'starttime': '2017-08-31T17:00:00'}
>>> d.metadata['samplingrate'] = 22050)
>>> print(d.metadata)
{'samplingrate': 22050, 'starttime': '2017-08-31T17:00:00'}
"""
if self._accessmode == 'r':
raise OSError("metadata not writeable; change 'accessmode' to "
"'r+'")
metadata = self._read()
metadata.update(*arg, **kwargs)
write_jsonfile(self.path, data=metadata, sort_keys=True,
ensure_ascii=True, overwrite=True)
if metadata:
self._callatfilecreationordeletion()
| StarcoderdataPython |
166699 | <filename>model/relation_transformer.py<gh_stars>0
# copy from: https://github.com/yahoo/object_relation_transformer/blob/master/models/RelationTransformerModel.py
##########################################################
# Copyright 2019 Oath Inc.
# Licensed under the terms of the MIT license.
# Please see LICENSE file in the project root for terms.
##########################################################
# This file contains Att2in2, AdaAtt, AdaAttMO, TopDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# TopDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
import numpy as np
from .utils import BoxRelationalEmbedding
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, boxes, tgt, src_mask, tgt_mask):
"Take in and process masked src and target sequences."
return self.decode(self.encode(src, boxes, src_mask), src_mask,
tgt, tgt_mask)
def encode(self, src, boxes, src_mask):
return self.encoder(self.src_embed(src), boxes, src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, box, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, box, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, box, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, box, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"Follow Figure 1 (right) for connections."
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
def box_attention(query, key, value, box_relation_embds_matrix, mask=None, dropout=None):
'''
Compute 'Scaled Dot Product Attention as in paper Relation Networks for Object Detection'.
Follow the implementation in https://github.com/heefe92/Relation_Networks-pytorch/blob/master/model.py#L1026-L1055
'''
N = value.size()[:2]
dim_k = key.size(-1)
dim_g = box_relation_embds_matrix.size()[-1]
w_q = query
w_k = key.transpose(-2, -1)
w_v = value
# attention weights
scaled_dot = torch.matmul(w_q, w_k)
scaled_dot = scaled_dot / np.sqrt(dim_k) # (bs, h, O, O)
if mask is not None:
scaled_dot = scaled_dot.masked_fill(~mask.repeat(
1, N[1], 1, 1), -1e9) # mask(8,1,8) scaled_dot(8,4, 8,8)
# w_g = box_relation_embds_matrix.view(N,N)
w_g = box_relation_embds_matrix
w_a = scaled_dot
# w_a = scaled_dot.view(N,N)
# multiplying log of geometric weights by feature weights
w_mn = torch.log(torch.clamp(w_g, min=1e-6)) + w_a
w_mn = torch.nn.Softmax(dim=-1)(w_mn)
if dropout is not None:
w_mn = dropout(w_mn)
# att_weights*value.
output = torch.matmul(w_mn, w_v)
# w_mn is attention weights.
return output, w_mn
class BoxMultiHeadedAttention(nn.Module):
'''
Self-attention layer with relative position weights.
Following the paper "Relation Networks for Object Detection" in https://arxiv.org/pdf/1711.11575.pdf
'''
def __init__(self, h, d_model, trignometric_embedding=True, legacy_extra_skip=False, dropout=0.1):
"Take in model size and number of heads."
super(BoxMultiHeadedAttention, self).__init__()
assert d_model % h == 0
self.trignometric_embedding = trignometric_embedding
self.legacy_extra_skip = legacy_extra_skip
# We assume d_v always equals d_k
self.h = h
self.d_k = d_model // h
if self.trignometric_embedding:
self.dim_g = 64
else:
self.dim_g = 4
geo_feature_dim = self.dim_g
# matrices W_q, W_k, W_v, and one last projection layer
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.WGs = clones(nn.Linear(geo_feature_dim, 1, bias=True), self.h)
self.box_attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, input_query, input_key, input_value, input_box, mask=None):
"Implements Figure 2 of Relation Network for Object Detection"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = input_query.size(0)
# tensor with entries R_mn given by a hardcoded embedding of the relative position between bbox_m and bbox_n
relative_geometry_embeddings = BoxRelationalEmbedding(
input_box, trignometric_embedding=self.trignometric_embedding) # (bs, 36, 36, 64)
flatten_relative_geometry_embeddings = relative_geometry_embeddings.view(
-1, self.dim_g) # (bs*36*36, 64)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (input_query, input_key, input_value))] # (bs, h, O, 180/h)
box_size_per_head = list(relative_geometry_embeddings.shape[:3])
box_size_per_head.insert(1, 1) # (bs, 1, 36, 36)
relative_geometry_weights_per_head = [
l(flatten_relative_geometry_embeddings).view(box_size_per_head) for l in self.WGs]
relative_geometry_weights = torch.cat(
(relative_geometry_weights_per_head), 1) # (bs, h, 36, 36)
relative_geometry_weights = F.relu(relative_geometry_weights)
# 2) Apply attention on all the projected vectors in batch.
x, self.box_attn = box_attention(query, key, value, relative_geometry_weights, mask=mask,
dropout=self.dropout)
# x(8,4,8,45), box_attn(8,4,8,8)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
# x(bs, O, 180)
# An extra internal skip connection is added. This is only
# kept here for compatibility with some legacy models. In
# general, there is no advantage in using it, as there is
# already an outer skip connection surrounding this layer.
if self.legacy_extra_skip:
x = input_value + x
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
def get_rel_encoder_layer(d_model, nhead, d_ff=2048, dropout=0.1, trignometric_embedding=True, legacy_extra_skip=False):
c = copy.deepcopy
bbox_attn = BoxMultiHeadedAttention(
nhead, d_model, trignometric_embedding, legacy_extra_skip)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
return EncoderLayer(d_model, c(bbox_attn), c(ff), dropout)
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(
input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0, len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(
att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(
att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
| StarcoderdataPython |
119976 | <gh_stars>1-10
from lale.sklearn_compat import clone_op
from lale.operators import Operator, make_operator
import logging
import inspect
import importlib
logger = logging.getLogger(__name__)
def wrap_imported_operators():
calling_frame = inspect.stack()[1][0]
symtab = calling_frame.f_globals
for name, impl in symtab.items():
if inspect.isclass(impl) and not issubclass(impl, Operator):
module = impl.__module__.split('.')[0]
klass = impl.__name__
try:
m = importlib.import_module('lale.lib.' + module)
symtab[name] = clone_op(getattr(m, klass), name)
logger.info(f'Lale:Wrapped known operator:{name}')
except (ModuleNotFoundError, AttributeError):
try:
m = importlib.import_module('lale.lib.autogen')
symtab[name] = clone_op(getattr(m, klass), name)
logger.info(f'Lale:Wrapped autogen operator:{name}')
except (ModuleNotFoundError, AttributeError):
if hasattr(impl, 'fit') and (
hasattr(impl, 'predict') or hasattr(impl, 'transform')):
logger.info(f'Lale:Wrapped unkwnown operator:{name}')
symtab[name] = make_operator(impl=impl, name=name)
| StarcoderdataPython |
1757814 | <reponame>amithapa/learn_py_asyncio
import asyncio
import random
async def myCoroutine(id):
process_time = random.randint(1, 5)
await asyncio.sleep(process_time)
print(f"Coroutine: {id}, has successfully completed after {process_time} seconds.")
async def main():
tasks = []
for i in range(10):
tasks.append(asyncio.ensure_future(myCoroutine(i)))
await asyncio.gather(*tasks)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
| StarcoderdataPython |
65582 | <filename>jiamtrader/app/algo_trading/algos/twap_algo.py
from jiamtrader.trader.constant import Offset, Direction
from jiamtrader.trader.object import TradeData
from jiamtrader.trader.engine import BaseEngine
from jiamtrader.app.algo_trading import AlgoTemplate
class TwapAlgo(AlgoTemplate):
""""""
display_name = "TWAP 时间加权平均"
default_setting = {
"vt_symbol": "",
"direction": [Direction.LONG.value, Direction.SHORT.value],
"price": 0.0,
"volume": 0.0,
"time": 600,
"interval": 60,
"offset": [
Offset.NONE.value,
Offset.OPEN.value,
Offset.CLOSE.value,
Offset.CLOSETODAY.value,
Offset.CLOSEYESTERDAY.value
]
}
variables = [
"traded",
"order_volume",
"timer_count",
"total_count"
]
def __init__(
self,
algo_engine: BaseEngine,
algo_name: str,
setting: dict
):
""""""
super().__init__(algo_engine, algo_name, setting)
# Parameters
self.vt_symbol = setting["vt_symbol"]
self.direction = Direction(setting["direction"])
self.price = setting["price"]
self.volume = setting["volume"]
self.time = setting["time"]
self.interval = setting["interval"]
self.offset = Offset(setting["offset"])
# Variables
self.order_volume = self.volume / (self.time / self.interval)
self.timer_count = 0
self.total_count = 0
self.traded = 0
self.subscribe(self.vt_symbol)
self.put_parameters_event()
self.put_variables_event()
def on_trade(self, trade: TradeData):
""""""
self.traded += trade.volume
if self.traded >= self.volume:
self.write_log(f"已交易数量:{self.traded},总数量:{self.volume}")
self.stop()
else:
self.put_variables_event()
def on_timer(self):
""""""
self.timer_count += 1
self.total_count += 1
self.put_variables_event()
if self.total_count >= self.time:
self.write_log("执行时间已结束,停止算法")
self.stop()
return
if self.timer_count < self.interval:
return
self.timer_count = 0
tick = self.get_tick(self.vt_symbol)
if not tick:
return
self.cancel_all()
left_volume = self.volume - self.traded
order_volume = min(self.order_volume, left_volume)
if self.direction == Direction.LONG:
if tick.ask_price_1 <= self.price:
self.buy(self.vt_symbol, self.price,
order_volume, offset=self.offset)
else:
if tick.bid_price_1 >= self.price:
self.sell(self.vt_symbol, self.price,
order_volume, offset=self.offset)
| StarcoderdataPython |
118600 | <filename>moar_dots/wipe.py<gh_stars>0
import logging
import os
import random
import yaml
from time import sleep
from .config import easter
from .constants import EASTER_FILE, ERROR_FILE
class Wipe:
"""
An very strongly themed error handling class for moar-dots.
Also has an easter egg.
"""
def __init__(self, error, *args):
self.log = logging.getLogger(__name__)
self.log.debug("Initializing Wipe object...")
self.error = error
self.text = []
for arg in args:
self.text.append(arg)
self.errors = self._get_aggro()
self._onyxia_wipe()
def _get_aggro(self):
"""
Retrieve all error messages from the error YAML file.
"""
self.log.debug("Retrieving all error messages...")
errors = []
with open(ERROR_FILE, "r") as file:
errors = list(yaml.load_all(file, Loader=yaml.FullLoader))
return errors
def _roll_for_error(self, aggro):
"""
Returns a random error based on the aggro level passed in.
If there is an exact hit on aggro level, it will use that error instead.
"""
self.log.debug("Rolling for an error under aggro level %d...", aggro)
error_choices = []
for error in self.errors:
if error["aggro"] < aggro:
error_choices.append(error)
elif error["aggro"] == aggro:
self.log.debug("Got an exact hit! Returning newly unlocked error!")
return error
else:
break
self.log.debug(f"Retrieved {len(error_choices)} error(s), choosing one...")
random.seed()
return random.choice(error_choices)
def _onyxia_wipe(self):
"""
Handles error reporting for issues with moar-dots.
Also prints out funny output and manages the easter egg.
"""
self.log.debug("Starting Onyxia Wipe...")
error_msg = self._roll_for_error(easter["aggro"])
self.log.debug(f"Error retrieved: {error_msg['error']}")
self.log.info("=" * 70)
for line in error_msg["text"]:
self.log.info(line)
sleep(2)
self.log.info("=" * 70)
self.log.error("(Seriously though, moar-dots had a sad.)")
self.log.error(f"\nException: {self.error}")
self.log.error("\nHere's some info about what happened:")
for line in self.text:
print(f" {line}")
self.log.debug("Updating easter egg data and quitting...")
easter["aggro"] += 1
if "dkpminus" in error_msg:
easter["dkpminus"] += error_msg["dkpminus"]
self.log.error(
f"\nOh, and by the way, that was a {error_msg['dkpminus']} DKP minus."
)
with open(EASTER_FILE, "w+") as file:
file.write(yaml.dump(easter))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.