id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
6675662 | <reponame>amoskowitz14/causalML<filename>projects/reinforcement learning/causal_reinforcement_learning/src/envs/pomdp.py
import types
from gym_pyro import PyroPOMDP
from . import renders
def make_pomdp(path, *args, **kwargs):
"""make_pomdp
Creates a PyroPOMDP instance based on the given POMDP file, injecting the
respective custom renderer if found in `renders.py`.
:param path: path to POMDP file
:param *args: arguments to PyroPOMDP
:param **kwargs: keyword arguments to PyroPOMDP
"""
with open(path) as f:
env = PyroPOMDP( # pylint: disable=missing-kwoa
f.read(), *args, **kwargs
)
basename = path.split('/')[-1]
try:
render = renders.get_render(basename)
except KeyError:
pass
else:
env.render = types.MethodType(render, env)
return env
| StarcoderdataPython |
6448159 | import datetime
import dateutil
import urllib
import functools
import time
import requests
import simplejson
from datapackage_pipelines_measure.config import settings
import logging
log = logging.getLogger(__name__)
DEFAULT_REPORT_START_DATE = '2014-01-01'
def request_data_from_discourse(domain, endpoint, **kwargs):
api_token = settings['DISCOURSE_API_TOKEN']
qs_dict = {'api_key': api_token}
qs_dict.update(kwargs)
qs = urllib.parse.urlencode(qs_dict)
url = urllib.parse.urlunparse(
('https', domain, endpoint, None, qs, None)
)
response = requests.get(url)
if response.status_code == 429:
# Too Many Requests
time.sleep(30)
return request_data_from_discourse(domain, endpoint, **kwargs)
elif response.status_code != 200:
raise ValueError(
'Error raised for domain:{}, '
'Status code:{}. '
'Error message: {}'.format(domain,
response.status_code,
response.content))
try:
json_response = response.json()
except simplejson.scanner.JSONDecodeError:
log.error('Expected JSON in response from: {}'.format(url))
raise ValueError('Expected JSON in response from: {}'.format(url))
return json_response
def request_report_from_discourse(domain, report, start_date,
category_id='all', end_date=None):
'''Request a report from discourse and return a dict of <date: count> key
values.'''
if end_date is None:
end_date = datetime.date.today().strftime("%Y-%m-%d")
if start_date is None:
start_date = DEFAULT_REPORT_START_DATE
endpoint = "/admin/reports/{}.json".format(report)
data = request_data_from_discourse(
domain, endpoint,
start_date=start_date,
end_date=end_date,
category_id=category_id)['report']['data']
return {dateutil.parser.parse(d['x']).date(): d['y'] for d in data}
@functools.lru_cache(maxsize=64)
def get_category_info_from_discourse(domain):
'''Return a list of top-level category names and ids, and their
subcategories, where appropriate. '''
endpoint = "/site.json"
site_data = request_data_from_discourse(domain, endpoint)
top_level = [{'id': c['id'], 'name': c['name'],
'slug': c['slug'], 'subcategories': []}
for c in site_data['categories']
if 'parent_category_id' not in c]
for c in site_data['categories']:
if 'parent_category_id' in c:
parent = next(pc for pc in top_level
if pc['id'] == c['parent_category_id'])
parent['subcategories'].append({'id': c['id'], 'slug': c['slug'],
'name': c['name'],
'subcategories': None})
return top_level
| StarcoderdataPython |
5000100 | from IGParameter import *
from PIL import Image
import copy
class IGParameterImage(IGParameter):
def __init__(self):
super().__init__("Image")
self._value["image"] = None
@property
def image(self):
return self._value["image"]
@image.setter
def image(self, image):
self._value["image"] = image
| StarcoderdataPython |
9739743 | <gh_stars>0
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import time
from flask_restful import Resource, reqparse
from fedlearner_webconsole.job.models import Job
from fedlearner_webconsole.job.es import es
from fedlearner_webconsole.exceptions import NotFoundException
from fedlearner_webconsole.k8s_client import get_client
class JobApi(Resource):
def get(self, job_id):
job = Job.query.filter_by(id=job_id).first()
if job is None:
raise NotFoundException()
return {'data': job.to_dict()}
# TODO: manual start jobs
class PodLogApi(Resource):
def get(self, pod_name):
parser = reqparse.RequestParser()
parser.add_argument('start_time', type=int, location='args',
required=True,
help='start_time is required and must be timestamp')
parser.add_argument('max_lines', type=int, location='args',
required=True,
help='max_lines is required')
data = parser.parse_args()
start_time = data['start_time']
max_lines = data['max_lines']
return {'data': es.query_log('filebeat-*', '', pod_name,
start_time,
int(time.time() * 1000))[-max_lines:]}
class JobLogApi(Resource):
def get(self, job_name):
parser = reqparse.RequestParser()
parser.add_argument('start_time', type=int, location='args',
required=True,
help='project_id is required and must be timestamp')
parser.add_argument('max_lines', type=int, location='args',
required=True,
help='max_lines is required')
data = parser.parse_args()
start_time = data['start_time']
max_lines = data['max_lines']
return {'data': es.query_log('filebeat-*', job_name,
'fedlearner-operator',
start_time,
int(time.time() * 1000))[-max_lines:]}
class PodContainerApi(Resource):
def get(self, job_id, pod_name):
job = Job.query.filter_by(id=job_id).first()
if job is None:
raise NotFoundException()
k8s = get_client()
base = k8s.get_base_url()
container_id = k8s.get_webshell_session(job.project.get_namespace(),
pod_name,
'tensorflow')
return {'data': {'id': container_id, 'base': base}}
def initialize_job_apis(api):
api.add_resource(JobApi, '/jobs/<int:job_id>')
api.add_resource(PodLogApi,
'/pods/<string:pod_name>/log')
api.add_resource(JobLogApi,
'/jobs/<string:job_name>/log')
api.add_resource(PodContainerApi,
'/jobs/<int:job_id>/pods/<string:pod_name>/container')
| StarcoderdataPython |
295105 | <reponame>kolyasalubov/Lv-677.PythonCore
def count_positives_sum_negatives(arr):
if not arr:
return []
positive_array_count = 0
negative_array_count = 0
neither_array = 0
for i in arr:
if i > 0:
positive_array_count = positive_array_count + 1
elif i == 0:
neither_array = neither_array + i
else:
negative_array_count = negative_array_count + i
return [positive_array_count, negative_array_count] | StarcoderdataPython |
8168192 | import logging
from icrawl_plugin import IHostCrawler
from utils.config_utils import crawl_config_files
logger = logging.getLogger('crawlutils')
class ConfigHostCrawler(IHostCrawler):
def get_feature(self):
return 'config'
def crawl(
self,
root_dir='/',
exclude_dirs=[
'/dev',
'/proc',
'/mnt',
'/tmp',
'/var/cache',
'/usr/share/man',
'/usr/share/doc',
'/usr/share/mime'],
known_config_files=[
'/etc/passwd',
'/etc/group',
'/etc/hosts',
'/etc/hostname',
'/etc/mtab',
'/etc/fstab',
'/etc/aliases',
'/etc/ssh/ssh_config',
'/etc/ssh/sshd_config',
'/etc/sudoers'],
discover_config_files=False,
**kwargs):
return crawl_config_files(
root_dir=root_dir,
exclude_dirs=exclude_dirs,
known_config_files=known_config_files,
discover_config_files=discover_config_files)
| StarcoderdataPython |
1982777 | import os
import sys
import h5py
import numpy as np
import shutil
job_repeat_attempts = 5
def check_file(filename):
if not os.path.exists(filename):
return False
# verify the file has the expected data
import h5py
f = h5py.File(filename, 'r')
fkeys = f.keys()
f.close()
if set(fkeys) != set(['merges']):
os.unlink(filename)
return False
return True
if __name__ == '__main__':
output_path = sys.argv[-1]
repeat_attempt_i = 0
while repeat_attempt_i < job_repeat_attempts and not check_file(output_path):
repeat_attempt_i += 1
try:
outf = h5py.File(output_path + '_partial', 'w')
outmerges = np.zeros((0, 2), dtype=np.uint64)
for filename in sys.argv[1:-1]:
try:
print filename
f = h5py.File(filename, 'r')
assert ('merges' in f) or ('labels' in f)
if 'merges' in f:
outmerges = np.vstack((outmerges, f['merges'][...].astype(np.uint64)))
if 'labels' in f:
# write an identity map for the labels
labels = np.unique(f['labels'][...])
labels = labels[labels > 0]
labels = labels.reshape((-1, 1))
outmerges = np.vstack((outmerges, np.hstack((labels, labels)).astype(np.uint64)))
except Exception, e:
print e, filename
raise
if outmerges.shape[0] > 0:
outf.create_dataset('merges', outmerges.shape, outmerges.dtype)[...] = outmerges
outf.close()
shutil.move(output_path + '_partial', output_path)
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
outf.close()
except KeyboardInterrupt:
pass
except:
print "Unexpected error:", sys.exc_info()[0]
if repeat_attempt_i == job_repeat_attempts:
raise
assert check_file(output_path), "Output file could not be verified after {0} attempts, exiting.".format(job_repeat_attempts)
| StarcoderdataPython |
3582283 | <gh_stars>1-10
"""
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: <NAME>, ETH Zurich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from numba import jit, prange
# Perform delay and sum operation with numba
# Input: rf_data_in of shape (n_samples x n_elements)
# delays_idx of shape (n_modes x n_elements x n_points)
# apod_weights of shape (n_points x n_elements)
@jit(nopython = True, parallel = True, nogil = True)
def delay_and_sum_numba(rf_data_in, delays_idx, apod_weights = None):
n_elements = rf_data_in.shape[1]
n_modes = delays_idx.shape[0]
n_points = delays_idx.shape[2]
# Allocate array
das_out = np.zeros((n_modes, n_points), dtype=np.complex64)
# Iterate over modes, points, elements
for i in range(n_modes):
for j in prange(n_points):
for k in range(n_elements):
if (delays_idx[i, k, j] <= rf_data_in.shape[0] - 1):
if apod_weights is None:
das_out[i, j] += rf_data_in[delays_idx[i, k, j], k]
else:
das_out[i, j] += rf_data_in[delays_idx[i, k, j], k] * apod_weights[j, k]
return das_out
# Perform delay and sum operation with numpy
# Input: rf_data_in of shape (n_samples x n_elements)
# delays_idx of shape (n_modes x n_elements x n_points)
def delay_and_sum_numpy(rf_data_in, delays_idx, apod_weights = None):
n_elements = rf_data_in.shape[1]
n_modes = delays_idx.shape[0]
n_points = delays_idx.shape[2]
# Add one zero sample for data array (in the end)
rf_data_shape = rf_data_in.shape
rf_data = np.zeros((rf_data_shape[0] + 1, rf_data_shape[1]), dtype=np.complex64)
rf_data[:rf_data_shape[0],:rf_data_shape[1]] = rf_data_in
# If delay index exceeds the input data array dimensions,
# write -1 (it will point to 0 element)
delays_idx[delays_idx >= rf_data_shape[0] - 1] = -1
# Choose the right samples for each channel and point
# using numpy fancy indexing
# Create array for fancy indexing of channels
# of size (n_modes x n_points x n_elements)
# The last two dimensions are transposed to fit the rf_data format
fancy_idx_channels = np.arange(0, n_elements)
fancy_idx_channels = np.tile(fancy_idx_channels, (n_modes, n_points, 1))
# Create array for fancy indexing of samples
# of size (n_modes x n_points x n_elements)
# The last two dimensions are transposed to fit the rf_data format
fancy_idx_samples = np.transpose(delays_idx, axes = [0, 2, 1])
# Make the delay and sum operation by selecting the samples
# using fancy indexing,
# multiplying by apodization weights (optional)
# and then summing them up along the last axis
if apod_weights is None:
das_out = np.sum(rf_data[fancy_idx_samples, fancy_idx_channels], axis = -1)
else:
das_out = np.sum(np.multiply(rf_data[fancy_idx_samples, fancy_idx_channels], apod_weights), axis = -1)
# Output shape: (n_modes x n_points)
return das_out
| StarcoderdataPython |
3234044 | <filename>CorbanDallas1982/Home_work_6/HW_6_3.py<gh_stars>0
word = input("Enter your word:")
def f():
global word
return {x: list(word).count(x) for x in list(word)}
print(f()) | StarcoderdataPython |
3543111 | from rest_framework import serializers
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from .models import Supply, Standard, Connector, SupplyConnectorRelation
class ConnectorField(serializers.ModelField):
def __init__(self, *args, **kwargs):
kwargs['source'] = '*'
super().__init__(*args, **kwargs)
def to_representation(self, value):
return value.get_connectors()
def to_internal_value(self, data):
return {self.field_name: data}
class SupplySerializer(serializers.ModelSerializer):
connectors = ConnectorField(model_field=Supply)
class Meta:
model = Supply
fields = ("uuid", "type", "number", "category", "name", "manufacturer", "model", "serial_number", "length", "owner", "bought_at", "parent", "standards", "connectors", "connected_supplies", "position", "is_power_cable", "is_signal_cable", "is_active_cable", "note")
extra_kwargs = {
'uuid': {'read_only': True},
'type': {'read_only': True},
'number': {'read_only': True},
}
def get_connectors(self, instance):
return instance.get_connectors()
def save(self, **kwargs):
connectors = self.initial_data["connectors"]
deleted_connector_relations = self.instance.connector_relations.exclude(id__in=[connector["pk"] for connector in connectors if connector.get("pk")])
deleted_connector_relations.delete()
self.instance.set_connectors(connectors)
self.validated_data.pop("connectors")
super(SupplySerializer, self).save(**kwargs)
class RecursiveField(serializers.Serializer):
def to_representation(self, value):
serializer = self.parent.parent.__class__(value, context=self.context)
return serializer.data
class StandardSerializer(serializers.ModelSerializer):
children = RecursiveField(many=True, read_only=True)
class Meta:
model = Standard
fields = ("pk", "name", "parent", "children")
extra_kwargs = {
'pk': {'read_only': True},
}
class ConnectorSerializer(serializers.ModelSerializer):
class Meta:
model = Connector
fields = ("pk", "name", "standard")
extra_kwargs = {
'pk': {'read_only': True},
}
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ("pk", "username")
extra_kwargs = {
'pk': {'read_only': True},
}
class ChangePasswordSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=True, validators=[validate_password])
password2 = serializers.CharField(write_only=True, required=True)
old_password = serializers.CharField(write_only=True, required=True)
class Meta:
model = get_user_model()
fields = ('old_password', 'password', '<PASSWORD>')
def validate(self, attrs):
if attrs['password'] != attrs['<PASSWORD>']:
raise serializers.ValidationError({"password": "<PASSWORD>が一致<PASSWORD>"})
return attrs
def validate_old_password(self, value):
user = self.context['request'].user
if not user.check_password(value):
raise serializers.ValidationError({"old_password": "<PASSWORD>ワードが間違っています"})
return value
def update(self, instance, validated_data):
instance.set_password(validated_data['password'])
instance.save()
return instance
| StarcoderdataPython |
9692682 | import numpy as np
import math
import itertools
def _p_val_1d(A, B, metric=np.mean, numResamples=10000):
"""Return p value of observed difference between 1-dimensional A and B"""
observedDiff = abs(metric(A) - metric(B))
combined = np.concatenate([A, B])
numA = len(A)
resampleDiffs = np.zeros(numResamples,dtype='float')
for resampleInd in range(numResamples):
permutedCombined = np.random.permutation(combined)
diff = metric(permutedCombined[:numA]) - metric(permutedCombined[numA:])
resampleDiffs[resampleInd] = diff
pVal = (np.sum(resampleDiffs > observedDiff) + np.sum(resampleDiffs < -observedDiff))/float(numResamples)
return pVal
def _ma_p_val_1d(A, B, metric=np.mean, numResamples=10000):
A = np.ma.masked_invalid(A, copy=True)
A = A.compressed()
B = np.ma.masked_invalid(B, copy=True)
B = B.compressed()
pVal = _p_val_1d(A, B, metric, numResamples)
return pVal
def _ma_p_val_concatenated(C, numSamplesFirstGroup, metric=np.mean, numResamples=10000):
A = C[:numSamplesFirstGroup]
B = C[numSamplesFirstGroup:]
pVal = _ma_p_val_1d(A, B, metric, numResamples)
return pVal
def p_val(A, B, axis=None, metric=np.mean, numResamples=10000):
"""Return the p value that metric(A) and metric(B) differ along an axis ignoring NaNs and masked elements.
Parameters
----------
A : array_like
Array containing numbers of first group.
B : array_like
Array containing numbers of second group.
axis : int, optional
Axis along which the p value is computed.
The default is to compute the p value of the flattened arrays.
metric : numpy function, optional
metric to calculate p value for.
The default is numpy.mean
numResamples : int, optional
number of permutations. The default is 10000.
Returns
-------
pValue : ndarray
An array with the same shape as `A` and `B`, with the specified axis removed.
If axis is None, a scalar is returned.
See Also
--------
fast_p_val : uses the same random permutation for all entries.
"""
A = A.copy()
B = B.copy()
if axis is None:
A = A.ravel()
B = B.ravel()
pVal = _ma_p_val_1d(A, B, metric, numResamples)
else:
numSamplesFirstGroup = A.shape[axis]
C = np.concatenate((A,B),axis=axis)
pVal = np.apply_along_axis(_ma_p_val_concatenated, axis, C, numSamplesFirstGroup, metric, numResamples)
return pVal
def fast_p_val(A, B, axis=0, metric=np.mean, numResamples=10000):
"""Return the p value that metric(A) and metric(B) differ along an axis.
Parameters
----------
A : array_like
Array containing numbers of first group.
B : array_like
Array containing numbers of second group.
axis : int, optional
Axis along which the p value is computed.
The default is to compute the p value along the first dimension.
metric : numpy function, optional
metric to calculate p value for.
The default is numpy.mean
numResamples : int, optional
number of permutations. The default is 10000.
Returns
-------
pValue : ndarray
An array with the same shape as `A` and `B`, with the specified axis removed.
See Also
--------
p_val : ignores NaNs and masked elements, and independently calculates random
permutations for each entry.
"""
rolledA = np.rollaxis(A,axis)
rolledB = np.rollaxis(B,axis)
observedDiff = abs(metric(rolledA,axis=0) - metric(rolledB,axis=0))
combined = np.concatenate((rolledA, rolledB),axis=0)
numA = np.shape(rolledA)[0]
resampleShape = np.array(combined.shape)
resampleShape[0] = numResamples
resampleDiffs = np.zeros(resampleShape,dtype='float')
for resampleInd in range(numResamples):
permutedCombined = np.random.permutation(combined)
diff = metric(permutedCombined[:numA,...],axis=0) - metric(permutedCombined[numA:,...],axis=0)
resampleDiffs[resampleInd,...] = diff
pVal = (np.sum(resampleDiffs > observedDiff,axis=0) + np.sum(resampleDiffs < -observedDiff,axis=0))/float(numResamples)
return pVal
def exact_p_val(A, B, axis=0, metric=np.mean):
"""Return the p value that metric(A) and metric(B) differ along an axis. By computing all
permutations of the union of A and B with corresponding sizes
Parameters
----------
A : array_like
Array containing numbers of first group.
B : array_like
Array containing numbers of second group.
axis : int, optional
Axis along which the p value is computed.
The default is to compute the p value along the first dimension.
metric : numpy function, optional
metric to calculate p value for.
The default is numpy.mean
Returns
-------
pValue : ndarray
An array with the same shape as `A` and `B`, with the specified axis removed.
See Also
--------
p_val : for larger sample sizes, in which computing all permutations is infeasible
"""
rolledA = np.rollaxis(A,axis)
rolledB = np.rollaxis(B,axis)
observedDiff = abs(metric(rolledA,axis=0) - metric(rolledB,axis=0))
combined = np.concatenate((rolledA, rolledB),axis=0)
numA = np.shape(rolledA)[0]
numB = np.shape(rolledB)[0]
resampleShape = np.array(combined.shape)
resampleShape[0] = math.factorial(numA + numB)/(math.factorial(numA)*math.factorial(numB))
resampleDiffs = np.zeros(resampleShape,dtype='float')
for resampleInd, indsToInclude in enumerate(itertools.combinations(range(numA + numB), numA)):
indsToExclude = [i for i in range(12) if i not in indsToInclude]
permutedA = combined[indsToInclude, ...]
permutedB = combined[indsToExclude, ...]
diff = metric(permutedA, axis=0) - metric(permutedB, axis=0)
resampleDiffs[resampleInd,...] = diff
pVal = (np.sum(resampleDiffs > observedDiff, axis=0) + np.sum(resampleDiffs < -observedDiff, axis=0))/float(resampleShape[0])
return pVal
| StarcoderdataPython |
5178261 | <reponame>rkwong43/Toh<filename>utils/ids/weapon_id.py
from enum import Enum, auto
"""Represents the IDs of different weapon types.
"""
class WeaponID(Enum):
# Weapon types
GUN = auto()
SHOTGUN = auto()
MACHINE_GUN = auto()
FLAK_GUN = auto()
MISSILE_LAUNCHER = auto()
FLAK_CANNON = auto()
DIAMOND_DUST = auto()
MULTI_MISSILE = auto()
STRIKER = auto()
# Rapid fire missiles
SWARM = auto()
# Fires a huge spread of flak that slow down the further they move
CONSTELLATION = auto()
# Burst fire multiple volleys of homing bullets
AURORA = auto()
RAILGUN = auto()
| StarcoderdataPython |
9703565 | <reponame>L-Net-1992/Paddle
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import _non_static_mode
from paddle import _C_ops
from paddle.static import default_main_program
def dropout_nd(x,
p=0.5,
axis=None,
training=True,
mode="upscale_in_train",
name=None):
drop_axes = [axis] if isinstance(axis, int) else list(axis)
seed = None
mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer
if _non_static_mode():
if default_main_program().random_seed != 0:
seed = default_main_program().random_seed
out, mask = _C_ops.dropout_nd(x, 'dropout_prob', p, 'is_test',
not training, 'fix_seed', seed
is not None, 'seed',
seed if seed is not None else 0,
'dropout_implementation', mode, 'axis',
drop_axes)
return out
helper = LayerHelper('dropout_nd', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'dropout')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
mask = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': mode,
'axis': drop_axes
}
return attrs
attrs = get_attrs(helper.main_program, p, not training, seed)
helper.append_op(type='dropout_nd',
inputs={'X': [x]},
outputs={
'Out': [out],
'Mask': [mask]
},
attrs=attrs)
return out
paddle.enable_static()
class TestDropoutNdOp(OpTest):
def setUp(self):
self.op_type = "dropout_nd"
self.inputs = {'X': np.random.random((4, 32, 16)).astype("float64")}
self.attrs = {
'dropout_prob': 0.0,
'fix_seed': True,
'is_test': False,
'axis': [1]
}
self.outputs = {
'Out': self.inputs['X'],
'Mask': np.ones((1, 32, 1)).astype('uint8')
}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
class TestDropoutNdAPI(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def test_dygraph(self):
paddle.disable_static()
for place in self.places:
with fluid.dygraph.guard(place):
in_np = np.random.random([4, 32, 16]).astype("float32")
input = paddle.to_tensor(in_np)
res1 = dropout_nd(x=input, p=0., axis=[0, 1])
res2 = dropout_nd(x=input, p=0.5, axis=[0, 1])
self.assertTrue(np.allclose(res1.numpy(), in_np))
paddle.enable_static()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6603018 | from rest_framework import serializers
from app.models import CountTrue
class UserSerializer(serializers.Serializer):
"""
A serializer class for serializing the SlackUsers
"""
id = serializers.IntegerField()
firstname = serializers.CharField()
lastname = serializers.CharField()
photo = serializers.CharField()
is_tapped = serializers.BooleanField()
class SecureUserSerializer(UserSerializer):
"""
A serializer class for serializing SlackUsers that exposes the SlackID.
This is used only under authorized access.
"""
slack_id = serializers.CharField()
class ReportSerializer(serializers.Serializer):
@classmethod
def count(cls, queryset):
"""
This method counts the number of breakfast and lunch served.
It writes the results to a list having the below structure.
[
{breakfast:XX, lunch:XX, date:"YYYY-MM-DD"},
..
]
"""
result_group = queryset.values('date')
annotate_report = result_group.annotate(
breakfast=CountTrue('breakfast'),
lunch=CountTrue('lunch')
)
def serialize(queryset):
return [
{"breakfast": res["breakfast"], "lunch": res["lunch"],
"date": res["date"]} for res in queryset
]
return serialize(annotate_report)
# lunch = queryset.filter(lunch=1).values('date').count()
| StarcoderdataPython |
9741123 | from unittest import TestCase
from strStr_dfa import Solution
class TestSolution(TestCase):
so = Solution()
def test_get_dfa(self):
dp = self.so.get_dfa('ababdababa')
# X取值依次为[0,0,1,2,0,1,2,3,4]
for i, e in enumerate(dp):
for j, number in enumerate(e):
if number:
print(i, chr(j), number)
| StarcoderdataPython |
4871247 | <filename>ddc_packages/hddump/hddump/hddumpMain.py
"""
Demonstration handle dump for CMIP/ESGF files ..
USAGE
=====
-h: print this message;
-v: print version;
-t: run a test
-f <file name>: examine file, print path to replacement if this file is obsolete, print path to sibling files (or replacements).
-id <tracking id>: examine handle record of tracking id.
-V: verbose
--debug: debug
--DEBUG: debug with extended output
"""
## see https://www.handle.net/proxy_servlet.html for info on restfull API
import collections, os, re
##import cfdm
try:
import netCDF4
NETCDF_SUPPORTED = True
except:
NETCDF_SUPPORTED = False
import xml
import http
from xml.dom import minidom
import hddump.packageConfig as packageConfig
import urllib
from urllib import request
from hddump.testdata import *
class Phandle(object):
def __init__(self, hdlDict, k='values'):
"""Obsolete class to parse handle metadat ... replaced by Open class"""
self.h = hdlDict
self.d = {}
try:
for r in hdlDict[k]:
self.d[r['type']] = r['data']
except:
print ( hdlDict[k] )
raise
class Remote(object):
htmpl = 'http://hdl.handle.net/api/handles/%s'
dh = dummyHandles()
httpcodes = {x.value:x.name for x in http.HTTPStatus}
def __init__(self,hdl,url=None):
"""Class to retrieve a handle .. optionally to retrieve from test data.
Still needs some error handling based on the HTTP response code."""
if hdl[:5] == 'xxxxx':
self.msg = self.dh.hh[hdl]
else:
if url == None:
thisid = hdl.replace('hdl:999999', '10876.test' )
if thisid[:4] == 'hdl:':
thisid = thisid[4:]
url = self.htmpl % thisid
self.fetch(url)
def fetch(self,url):
"""Retrieve the handle data, using urllib ir requests library; handle metadata is stored in self.msg"""
try:
fh = request.urlopen( url )
self.httpCode = fh.getcode()
except urllib.error.HTTPError as e:
print ( e.headers )
print( '%s: %s' % (e.code,self.httpcodes.get(e.code,'????')) )
self.httpCode = e.code
raise
msg = eval( fh.read() )
assert type( msg ) == type( {} ), 'Response of wrong type'
for k in ['responseCode', 'handle']:
assert k in msg, 'Required key %s not found: %s' % (k, str( msg.keys() ) )
self.msg = msg
class Open(object):
"""Create a handle object defined by a handle ID.
Initial object simply holds the id, to retrieve the object, execute the get() method.
If the argument is a ";" separated list, the tolist() method should be executed to convert to
a list of handle objects before executing the get() method on each element of the list.
This approach is perhaps a little unusual for an "Open" class ... but works well with given
handle record structure.
"""
cache = {}
htmpl = 'http://hdl.handle.net/api/handles/%s'
dh = dummyHandles()
def __init__(self,id,debug=False):
self.REC_id = id
self.REC_got = False
self.debug = debug
self.rec = dict()
def __repr__(self):
return self.REC_id
def tolist(self):
if self.REC_id.find( ';' ) == -1:
return [self, ]
else:
this = []
for id in self.REC_id.split(';'):
this.append( Open( id.strip() ) )
return this
def toDict(self):
if not self.REC_got:
self.get()
return self.rec
def dump(self):
print (self.toDict() )
def get(self, extract=True):
if self.REC_got:
return
#
# using the class object to store the cache of retrieved handles.
# This enables some caching ... NOT TESTED
#
if self.REC_id not in self.__class__.cache:
self.remote = Remote( self.REC_id )
self.__class__.cache[self.REC_id] = self.remote.msg
if extract:
self._extract( self.__class__.cache[self.REC_id] )
def _extract( self, msg ):
"""Extract alues from a handle message dictionary, and insert into self.rec"""
if self.debug:
print( msg.keys() )
print( msg['handle'] )
for r in msg['values']:
if str(r['type']) in ['IS_PART_OF','HAS_PARTS','replaces', 'replacedBy', 'isReplacedBy','parent','REPLACED_BY']:
self.rec[r['type']] = Open( r['data']['value'] ).tolist()
else:
self.rec[r['type']] = r['data']['value']
if self.rec['AGGREGATION_LEVEL'] == 'DATASET':
self.obsolete = 'REPLACED_BY' in self.rec
def addLatest(self):
"""Retrieve handle records for replacements until a current dataset is found."""
if not self.obsolete:
return
self.rec['REPLACED_BY'].get()
self.replacements=[self.rec['REPLACED_BY'],]
while self.replacements[-1].obsolete:
self.replacements.append( self.replacements[-1].rec['REPLACED_BY'].get() )
self.latest = self.replacements[-1]
def addSiblings(self):
if self.rec['AGGREGATION_LEVEL'] != 'FILE':
print( 'No known siblings .....' )
return
if 'IS_PART_OF' not in self.rec:
print( 'No parent' )
return
for p in self.IS_PART_OF:
p.get()
self.p.obsolete = all( [p.obsolete for p in self.p.rec['IS_PART_OF']] )
self.siblings = []
for p in self.rec['IS_PART_OF']:
for c in p.rec['HAS_PARTS']:
c.get()
self.siblings.append( c )
class Main(object):
"""Main: entry point, parsing comman line arguments.
USAGE
-----
m = Main( argList )
"""
knownargs0 = ['-h','-v','-t','-V','--debug', '--DEBUG']
knownargs1 = ['-f','-id']
re1 = re.compile( 'host="(.*?)"' )
re2 = re.compile( '<location(.*?)/>' )
def __init__(self, args):
self.re_dict = dict()
for k in ['host','href']:
self.re_dict[k] = re.compile( '%s="(.*?)"' % k )
self.htmpl = 'http://hdl.handle.net/api/handles/%s'
self.version = packageConfig.version
self.args = args
self.parseArgs()
if self.d.get( '-v', False ):
print ( self.version )
return
if self.d.get( '-h', False ):
print (self.version)
print ( __doc__ )
return
if self.d.get( '-t', False ):
self.runTest()
return
self.debugPlus = '--DEBUG' in self.d
self.debug = '--debug' in self.d or self.debugPlus
self.verbose = ( '-V' in self.d ) or self.debug
if '-f' in self.d:
fn = self.d['-f']
self.dumpF(fn)
if '-id' in self.d:
id = self.d['-id']
self.dumpF('',id=id)
def dumpF(self,fn, id=None):
"""Dump information about a file"""
if id == None:
assert os.path.isfile( fn ), 'File %s not found' % fn
nchead = NcHead( fn )
thisid = nchead['tracking_id'].replace( 'hdl:999999', '10876.test' )
else:
thisid = id.replace('hdl:999999', '10876.test' )
if self.debug:
print (thisid)
self.res = {'id':thisid, 'name':fn}
self.p = Open( thisid )
self.p.get()
if self.debug:
print( 'KEYS: ', self.p.rec.keys() )
if self.debugPlus:
for k in sorted( self.p.rec.keys() ):
print( '%s: %s' % (k, self.p.rec[k] ) )
self._globals( self.p )
thisType = 'none'
if 'IS_PART_OF' in self.p.rec:
thisType = 'file'
for p in self.p.rec['IS_PART_OF']:
p.get()
self.p.obsolete = all( [p.obsolete for p in self.p.rec['IS_PART_OF']] )
self.res['parents'] = [(p.REC_id, p.rec['DRS_ID'], p.rec['VERSION_NUMBER']) for p in self.p.rec['IS_PART_OF']]
self.res['obsolete'] = self.p.obsolete
self.res['RC'] = {False:'OK', True:'OBSOLETE'}[self.res['obsolete']]
self.res['name'] = self.p.rec['FILE_NAME']
if not self.p.obsolete:
current = [p for p in self.p.rec['IS_PART_OF'] if not p.obsolete]
if not len(current) == 1:
print ('ERROR: dataset has more than one current version ...' )
self._extractDataset( current[0] )
#
# Extract replica information. Results will be stored to self.res['replicas']
#
self._extractReplicas( current[0] )
print( 'File: %(name)s [%(id)s] %(RC)s' % self.res )
elif 'HAS_PARTS' in self.p.rec:
thisType = 'ds'
self._extractDataset( self.p )
#
# Extract replica information. Results will be stored to self.res['replicas']
#
self._extractReplicas( self.p )
self.res['obsolete'] = self.p.obsolete
self.res['RC'] = {False:'OK', True:'OBSOLETE'}[self.res['obsolete']]
self.res['name'] = self.p.rec['DRS_ID']
print( 'Dataset: %(name)s [%(id)s] %(RC)s' % self.res )
elif 'IS_PART_OF' not in self.p.rec:
print ( 'dumpF - 01' )
print ( self.p.rec.keys() )
if self.debug:
print( 'No parent' )
self.res['parents'] = None
return
if self.verbose:
if thisType == 'file':
print( 'Master host: %(master_host)s' % self.res )
print( '\nDatasets:' )
for p in self.res['parents'] :
print( 'ID: %s, NAME: %s, VERSION: %s' % p )
print( '\nSiblings:' )
for p in sorted(self.res['siblings'], key=lambda x: x[0]):
if p[1] != self.res['id']:
print( 'NAME: %s, ID: %s' % p )
if len( self.res['replicas'] ) > 0:
print( '\nReplicas:' )
for p in self.res['replicas'] :
print( 'Host: %s' % p )
else:
print( '\nNo replicas.' )
elif thisType == 'ds':
print( 'Master host: %(master_host)s' % self.res )
print( '\nFiles:' )
#for p in self.res['siblings'] :
for p in sorted(self.res['siblings'], key=lambda x: x[0]):
print( 'NAME: %s, ID: %s' % p )
if len( self.res['replicas'] ) > 0:
print( '\nReplicas:' )
for p in self.res['replicas'] :
print( 'Host: %s' % p )
else:
print( '\nNo replicas.' )
def _globals(self,current):
if NETCDF_SUPPORTED:
self._extractFileURL(current)
dods = self.res['href'].replace('fileServer','dodsC')
nc = netCDF4.Dataset( dods )
for a in sorted( nc.ncattrs() ):
print (' %s:: %s' % (a, nc.getncattr( a )) )
else:
print ( "Netcdf not supported ... check installation of netCDF4 module")
def _extractFileURL( self, current ):
"""Extract the file URL from a file handle object"""
if "URL_ORIGINAL_DATA" in current.rec:
this = current.rec['URL_ORIGINAL_DATA']
locs = self.re2.findall( this )
href = self.re_dict['href'].findall( locs[0] )[0]
self.res['href'] = href
else:
print ('NO URL ORiGINAL DATA')
def _extractReplicas( self, current ):
"""Extract replica information from a DATASET handle object"""
if 'REPLICA_NODE' in current.rec:
rep = current.rec['REPLICA_NODE']
locs = self.re2.findall( rep )
reps = [self.re_dict['host'].findall(l)[0] for l in locs]
self.res['replicas'] = reps
else:
self.res['replicas'] = []
def _extractDataset( self, current ):
for c in current.rec['HAS_PARTS']:
c.get()
self.res['siblings'] = [(c.rec['FILE_NAME'],c.REC_id) for c in current.rec['HAS_PARTS']]
master = current.rec['HOSTING_NODE']
this = self.re1.findall( master )
assert len(this) == 1, 'Unexpected matches in search for master host'
self.res['master_host'] = this[0]
def runTest(self):
"""This test does not work any more ... the 10876.test handles appear to have been deleted ... they did not follow the current
schema.
"""
ex1 = ['hdl:21.14100/062520a0-f3d8-41bd-8b94-3fe0e4a6ab0e','tas_Amon_MPI-ESM1-2-LR_1pctCO2_r1i1p1f1_gn_185001-186912.nc']
ex1a = ['REC_id', 'REC_got', 'remote', "URL", 'AGGREGATION_LEVEL', 'FIXED_CONTENT', 'FILE_NAME', 'FILE_SIZE', 'IS_PART_OF', 'FILE_VERSION', 'CHECKSUM', 'CHECKSUM_METHOD', 'URL_ORIGINAL_DATA', "URL_REPLICA", 'HS_ADMIN', 'filename']
hdl = ex1[0]
self.p = Open( hdl )
self.p.get()
expected = ["URL", 'AGGREGATION_LEVEL', 'FILE_NAME', 'FILE_SIZE', 'IS_PART_OF', 'FILE_VERSION', 'CHECKSUM', 'CHECKSUM_METHOD', 'URL_ORIGINAL_DATA', "URL_REPLICA", 'HS_ADMIN']
## 'REPLACED_BY' if obsolete
for k in expected:
assert k in self.p.__dict__, 'Expected handle content key %s not found:: %s' % (k,str(self.p.__dict__.keys()))
for k in expected:
print ('%s: %s' % (k,self.p.__dict__[k]))
print ('PARSING PARENT ..... ' )
print ( self.htmpl % self.p.__dict__['IS_PART_OF'] )
##remote = Remote( self.p.__dict__['IS_PART_OF']['value'] )
self.pp = self.p.__dict__['IS_PART_OF']
for p in self.pp:
p.get()
for k in p.__dict__.keys():
print ('%s: %s' % (k,p.__dict__[k]))
#'isReplacedBy' if obsolete
##expected= ['creation_date', 'AGGREGATION_LEVEL', 'HS_ADMIN', '10320/loc', 'checksum', 'URL', 'children', 'tracking_id']
def parseArgs(self):
self.d = {}
kn = self.knownargs0 + self.knownargs1
xx = []
al = self.args
while len(al) > 0:
a = al.pop(0)
if a not in kn:
xx.append(a)
elif a in self.knownargs1:
self.d[a] = al.pop(0)
else:
self.d[a] = True
if len(xx) > 0:
print ('ARGUMENTS NOT RECOGNISED: %s' % str(xx) )
print ('FROM LIST: %s' % al)
class NcHead(dict):
def __init__(self, fn):
"""Read global attributes of a NetCDF file"""
nc0 = netCDF4.Dataset(fn, 'r')
for a in ['tracking_id','contact']:
if a in nc0.ncattrs():
self[a] = nc0.getncattr(a)
##nc0 = cfdm.read( fn )
##for a in ['tracking_id','contact']:
##if a in nc0[0].nc_global_attributes().keys():
##self[a] = nc0[0].get_property(a)
def run_test():
"""Run a simple test using a known handle"""
ex1 = ('hdl:21.14100/062520a0-f3d8-41bd-8b94-3fe0e4a6ab0e','tas_Amon_MPI-ESM1-2-LR_1pctCO2_r1i1p1f1_gn_185001-186912.nc')
print ("Running a test using tracking id %s\nfrom file %s\n---------------------------\n" % ex1 )
m = Main( ['-id',ex1[0],'-V'] )
def command_line_entry():
import sys
if len(sys.argv) == 1 or '-h' in sys.argv:
print ( __doc__ )
elif '-t' in sys.argv:
run_test()
else:
m = Main( sys.argv[1:] )
if __name__ == "__main__":
command_line_entry()
| StarcoderdataPython |
361425 | foobar = 'blah %d' % 1
c = 'a'
specialChars = 'what\'ll\r\nhappen\r\nhere\\?'
def fn(val):
return val + '!!'
baz = <
foo: "bar bar"
baz: blah
bar: 1
bob:
foo: bar
blah:
blah: blahber
someList:
- a
- b
- c
otherList: [1, 2, 3]
boolList: [true, false, !~ True]
nullVal: null
nullVallExpr: !~ None
specialCharsString: "what'll\r\nhappen\r\nhere\\?"
specialCharsExpr: !~ specialChars
anotherList: [
a, b, c, !~ c + 'd' + fn('bazzer')
]
>
foo: !~ a + b
<
alist:
- one
- !~ 1 + 4
- two
- three
>
baz.render() | StarcoderdataPython |
8100215 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo import ZooContext
class OrcaContextMeta(type):
_pandas_read_backend = "spark"
__eager_mode = True
_serialize_data_creation = False
@property
def log_output(cls):
"""
Whether to redirect Spark driver JVM's stdout and stderr to the current
python process. This is useful when running Analytics Zoo in jupyter notebook.
Default to be False. Needs to be set before initializing SparkContext.
"""
return ZooContext.log_output
@log_output.setter
def log_output(cls, value):
ZooContext.log_output = value
@property
def pandas_read_backend(cls):
"""
The backend for reading csv/json files. Either "spark" or "pandas".
spark backend would call spark.read and pandas backend would call pandas.read.
Default to be "spark".
"""
return cls._pandas_read_backend
@pandas_read_backend.setter
def pandas_read_backend(cls, value):
value = value.lower()
assert value == "spark" or value == "pandas", \
"pandas_read_backend must be either spark or pandas"
cls._pandas_read_backend = value
@property
def _eager_mode(cls):
"""
Whether to compute eagerly for SparkXShards.
Default to be True.
"""
return cls.__eager_mode
@_eager_mode.setter
def _eager_mode(cls, value):
assert isinstance(value, bool), "_eager_mode should either be True or False"
cls.__eager_mode = value
@property
def serialize_data_creation(cls):
"""
Whether add a file lock to the data loading process for PyTorch Horovod training.
This would be useful when you run multiple workers on a single node to download data
to the same destination.
Default to be False.
"""
return cls._serialize_data_creation
@serialize_data_creation.setter
def serialize_data_creation(cls, value):
assert isinstance(value, bool), "serialize_data_creation should either be True or False"
cls._serialize_data_creation = value
class OrcaContext(metaclass=OrcaContextMeta):
pass
| StarcoderdataPython |
3209627 | from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from mezzanine.conf import settings
import mezzanine_pagedown.urls
from tastypie.api import Api
from amp import views as amp_views
from blogapi.api import AllBlogSlugResource, BlogResource
from sitemaps.mobile_sitemaps import DisplayableSitemap as DisplayableMobileSitemap
from sitemaps.sitemaps import DisplayableSitemap
apiv1 = Api(api_name='v1')
apiv1.register(BlogResource())
apiv1.register(AllBlogSlugResource())
admin.autodiscover()
urlpatterns = i18n_patterns("",
("^admin/", include(admin.site.urls)),
)
if getattr(settings, "PACKAGE_NAME_FILEBROWSER") in settings.INSTALLED_APPS:
urlpatterns += i18n_patterns("",
("^admin/media-library/", include("%s.urls" %
settings.PACKAGE_NAME_FILEBROWSER)),
)
sitemaps = {"sitemaps": {"all": DisplayableSitemap}}
mobile_sitemaps = {"sitemaps": {"all": DisplayableMobileSitemap}}
urlpatterns += patterns("sitemaps.views",
("^sitemap\.xml$", "index", sitemaps),
("^sitemap_mobile\.xml$", "sitemap", mobile_sitemaps)
)
urlpatterns += patterns("feed.view",
url("feeds/(?P<format>.*)%s$" % "/",
"blog_post_feed", name="blog_post_feed"),
url("^blog/feeds/(?P<format>.*)%s$" % "/",
"blog_post_feed", name="blog_post_feed")
)
urlpatterns += patterns("homepage.views",
url("^$", "homepage", name="home"),
)
urlpatterns += patterns("",
("^events/", include("events.urls")),
)
urlpatterns += patterns('',
url("^amp/(?P<slug>.*)%s$" % '/', amp_views.amp_blog_post_detail, name="blog_post_detail"),
url("^pagedown/", include(mezzanine_pagedown.urls)),
url(r"^api/", include(apiv1.urls)),
url(r"^api/app/", include("api.urls")),
url(r'^api-token-auth/', 'rest_framework_jwt.views.obtain_jwt_token'),
url(r'^api-token-refresh/', 'rest_framework_jwt.views.refresh_jwt_token'),
url(r'^api-token-verify/', 'rest_framework_jwt.views.verify_jwt_token'),
url(r'^weixin', 'weixin.views.wechat'),
url("^", include("mezzanine.urls")),
)
handler404 = "mezzanine.core.views.page_not_found"
handler500 = "mezzanine.core.views.server_error"
| StarcoderdataPython |
52619 | <reponame>emaballarin/DSSC_DL_2021
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ==============================================================================
#
# :: IMProved ::
#
# Improved tools for Iterative Magnitude Pruning and PyTorch model masking
# with minimal-memory impact, device invariance, O(1) amortized lookup and
# tensor-mask on-demand materialization.
#
# ==============================================================================
# IMPORTS:
from typing import Union, Optional, Tuple, Set, TypedDict
from torch import Tensor
import torch as th
# CUSTOM TYPES:
realnum = Union[float, int]
class IdxSet(TypedDict):
"""Typed dictionary for string-named sets of ints"""
name: str
val: Set[int]
class Mask(TypedDict):
"""Typed dictionary for string-named IdXSets"""
name: str
val: IdxSet
# FUNCTIONS:
def paramsplit(
paramname: str, lfrom: Optional[int] = None, lto: Optional[int] = None
) -> Tuple[str]:
"""Extract a (layer name, parameter role) tuple form PyTorch model's named_parameters"""
return tuple(paramname.split("."))[lfrom:lto]
def _maskdrill(odict: Mask, okey: str, ikey: str) -> Set[int]:
"""Return the set contained in a Mask after double key lookup, empty if none"""
if okey in odict.keys():
if ikey in odict[okey].keys():
return odict[okey][ikey]
return set()
return set()
def mask_size(maskin: Mask) -> int:
"""
Count the number of elements inside a Mask object
Parameters
----------
maskin : Mask
The Mask the number of elements of is required
Returns
-------
int
The number of elements in such mask
"""
ret: int = 0
for okey in maskin.keys():
for ikey in maskin[okey].keys():
ret += len(maskin[okey][ikey])
return ret
def maskterialize(tsize, indexset: Set[int]) -> Tensor:
"""Materialize a Tensor mask from size and set of masked indices"""
return th.ones(tsize).index_put_(
values=th.tensor([0.0]), indices=(th.tensor([*indexset], dtype=th.long),)
)
def magnitude_pruning(
model,
rate: realnum,
restr_layers: Optional[Set[str]] = None,
restr_parameters: Optional[Set[str]] = None,
mask: Optional[Mask] = None,
) -> Mask:
"""
Apply one iteration of Iterative Magnitude Pruning to a PyTorch model
Parameters
----------
model : Any valid PyTorch model
The model to prune
rate : Union[float, int]
The pruning rate
restr_layers : Optional[Set[str]], optional
The number of the layers to which to restrict pruning. If None, no restriction is applied. Defaults to None.
restr_parameters : Optional[Set[str]], optional
The name of the tensor-parameters to which to restrict pruning. If None, no restriction is applied. Defaults to None.
mask : Optional[Mask], optional
A starting mask for the pruning process. Indexes denote already-pruned (or unprunable) parameters. Useful in case of repeated iterations of IMP. Defaults to None, equivalent to an empty mask.
Returns
-------
Mask
The pruning mask at the end of the IMP iteration.
Raises
------
ValueError
In case a pruning rate incompatible with a ratio is passed.
"""
# Validate signature
if rate < 0 or rate > 1:
raise ValueError(
"Given pruning rate {} is negative or exceeds 1. Provide a valid rate!"
)
# Fill empty elements, if None
if mask is None:
mask: Mask = {}
if restr_layers is None:
restr_layers = []
if restr_parameters is None:
restr_parameters = []
# First pass: look and select
sel_par = []
with th.no_grad():
for name, param in model.named_parameters():
layname = paramsplit(name, 1)[0]
parname = paramsplit(name, 1)[1]
if (not restr_layers or layname in restr_layers) and (
not restr_parameters or parname in restr_parameters
):
sel_par.append(
param.view(-1)[
list(
set(range(param.numel())).difference(
_maskdrill(mask, layname, parname)
)
)
]
.clone()
.detach()
.abs()
)
flat_par = th.cat(sel_par, dim=0).sort()[0]
# Pruning threshold
thresh = flat_par[int(rate * flat_par.shape[0])]
# Second pass: compare and prune
for name, param in model.named_parameters():
layname = paramsplit(name, 1)[0]
parname = paramsplit(name, 1)[1]
if (not restr_layers or layname in restr_layers) and (
not restr_parameters or parname in restr_parameters
):
flat_param = param.view(-1)
tensormask = th.where(flat_param.abs() >= thresh, 1, 0)
# Apply via the tensor-mask just created
flat_param.data *= tensormask
# Store the indexes of pruned elements
if not layname in mask.keys():
mask[layname] = IdxSet()
if not parname in mask[layname].keys():
mask[layname][parname] = set()
mask[layname][parname].update(
set((tensormask == 0).nonzero().view(-1).tolist())
)
# Return line
return mask
| StarcoderdataPython |
378715 | <gh_stars>100-1000
#!/usr/bin/env python
import os
from brutal.core.management import exec_overlord
if __name__ == "__main__":
os.environ.setdefault("BRUTAL_CONFIG_MODULE", "{{ spawn_name }}.config")
exec_overlord("{{ spawn_name }}.config") | StarcoderdataPython |
11298784 | import numpy as np
from matplotlib import pyplot as plt
from add_noise import salt_and_pepper, white_noise
def _gaussian_weight(im, p1, p2, n_size, filtering):
r = int(n_size / 2)
x1, y1 = p1
x2, y2 = p2
area1 = im[(x1 - r):(x1 + r + 1), (y1 - r):(y1 + r + 1), :]
area2 = im[(x2 - r):(x2 + r + 1), (y2 - r):(y2 + r + 1), :]
distance = np.sum(np.square(area1 - area2)) / filtering
if distance > 7:
return 0.0
else:
return np.exp(-max(distance, 0))
def non_local_means(im, k_size, n_size, filtering):
if im.ndim == 2:
im = im[..., np.newaxis]
r = int(k_size / 2) # kernel radius
h, w, c = im.shape
im_extended = np.zeros([h + k_size - 1, w + k_size - 1, c])
im_result = np.zeros_like(im_extended)
xa, ya = r, r
xb, yb = r + h, r + w
im_extended[xa:xb, ya:yb, :] = im
for x in range(xa, xb):
for y in range(ya, yb):
xxa = (x - r) if (x - r) > xa else xa
xxb = (x + r + 1) if (x + r + 1) < xb else xb
yya = (y - r) if (y - r) > ya else ya
yyb = (y + r + 1) if (y + r + 1) < yb else yb
weight_total = 0.0
for xx in range(xxa, xxb):
for yy in range(yya, yyb):
weight = _gaussian_weight(im_extended, [x, y], [xx, yy], n_size, filtering)
weight_total += weight
im_result[x, y, :] += weight * im_extended[xx, yy, :]
im_result[x, y, :] /= weight_total
return np.squeeze(im_result[xa:xb, ya:yb, :])
if __name__ == '__main__':
im = plt.imread('../images/box.jpg') / 256
im_noisy = white_noise(im, 0.1)
im_nlm = non_local_means(im_noisy, 5, 3, 0.5)
# plotting
fig, ax = plt.subplots(1, 3)
fig.set_size_inches(16, 8)
fig.set_tight_layout(True)
for a in ax:
a.axis('off')
ax[0].imshow(im, cmap='gray')
ax[0].set_title('Origin', fontsize=16)
ax[1].imshow(im_noisy, cmap='gray')
ax[1].set_title('Noisy', fontsize=16)
ax[2].imshow(im_nlm, cmap='gray')
ax[2].set_title('Non-local Means', fontsize=16)
plt.show()
| StarcoderdataPython |
1872033 | from utils.args import is_existing_file, is_valid_folder
from utils.timer import timer
import pandas as pd
from spacy.matcher import PhraseMatcher
import spacy
from nltk.tokenize import RegexpTokenizer
from pdfminer.high_level import extract_text, extract_text_to_fp
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdftypes import PDFStream, PDFObjRef, resolve1, stream_value
from pdfminer.psparser import PSKeyword, PSLiteral, LIT
from pdfminer.pdfpage import PDFPage
from pdfminer.layout import LAParams
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines
from tika import parser
from tqdm import tqdm
import pytesseract
from bs4 import BeautifulSoup
import sys
import os
import shutil
from io import StringIO
import yaml
import tempfile
import logging
import argparse
from pathlib import Path
import tika
tika.initVM()
try:
from PIL import Image
except ImportError:
import Image
input_file = "./data/samples/test/UBS_TEST.pdf"
output_folder = "./data/samples/test/SR_2019/"
similarity_threshold = 0.4
# TODO: Takes long to instantiate, so should be done just per thread!
nlp = spacy.load("en_core_web_md")
matcher = PhraseMatcher(nlp.vocab)
terms = ["climate change", "greenhouse gas",
"global warming", "emissions", "co2", "renewables"]
# Only run nlp.make_doc to speed things up
patterns = [nlp.make_doc(text) for text in terms]
matcher.add("TerminologyList", None, *patterns)
# Initialize tokens
# TODO: How can we use Phrase matcher so we can check tokens?
topic_tokens = nlp(
"climate emissions co2 carbon greenhouse ghg renewables footprint")
def get_max_similarity(word):
if not word.has_vector:
return 0
results = []
for token in topic_tokens:
sim = token.similarity(word)
results.append(sim)
return max(results)
class PdfExtractor:
def __init__(self, input_file, output_folder=None, parser="tika", ocr_strategy="no_ocr", **kwargs):
self.logger = logging.getLogger('pdf_extractor')
self.input_file = input_file
self.output_folder = output_folder
self.parser = parser
self.ocr_strategy = ocr_strategy
self.process_document()
def extract_toc(self):
""" Returns the extracted table of contents of the document (if found)
Credits
-------
https://github.com/pdfminer/pdfminer.six/blob/master/tools/dumppdf.py
Returns
-------
list: list of dict of { str: int, str: str, str: int}
A list of dictionaries containing the "level", "title" and "page_no"
"""
toc = []
with open(self.input_file, 'rb') as fp:
parser = PDFParser(fp)
doc = PDFDocument(parser)
pages = {page.pageid: pageno for (pageno, page)
in enumerate(PDFPage.create_pages(doc), 1)}
def resolve_dest(dest):
if isinstance(dest, str):
dest = resolve1(doc.get_dest(dest))
elif isinstance(dest, PSLiteral):
dest = resolve1(doc.get_dest(dest.name))
if isinstance(dest, dict):
dest = dest['D']
if isinstance(dest, PDFObjRef):
dest = dest.resolve()
return dest
try:
outlines = doc.get_outlines()
# For each found outline/bookmark, resolve the page number of the object
for (level, title, dest, a, se) in outlines:
page_no = None
if dest:
dest = resolve_dest(dest)
page_no = pages[dest[0].objid]
elif a:
action = a.resolve()
if isinstance(action, dict):
subtype = action.get('S')
if subtype and repr(subtype) == '/\'GoTo\'' and action.get('D'):
dest = resolve_dest(action['D'])
page_no = pages[dest[0].objid]
toc.append(
{"level": level, "title": title, "page_no": page_no})
except PDFNoOutlines:
print("No outline for PDF found!")
pass
except Exception:
print("General error getting outline for PDF")
parser.close()
return toc
def extract_with_tika(self):
"""
Note that pytika can be additionally configured via environment variables in the docker-compose file!
"""
pages_text = []
# Read PDF file and export to XML to keep page information
data = parser.from_file(
str(self.input_file),
xmlContent=True,
requestOptions={'timeout': 6000},
# 'X-Tika-PDFextractInlineImages' : true # Unfortunately does not really work
# Options: 'no_ocr', 'ocr_only' and 'ocr_and_text'
headers={'X-Tika-PDFOcrStrategy': self.ocr_strategy}
)
xhtml_data = BeautifulSoup(data['content'], features="lxml")
pages = xhtml_data.find_all('div', attrs={'class': 'page'})
for i, content in enumerate(tqdm(pages, disable=False)):
_buffer = StringIO()
_buffer.write(str(content))
parsed_content = parser.from_buffer(_buffer.getvalue())
text = ''
if parsed_content['content']:
text = parsed_content['content'].strip()
excertp = text.replace('\n', ' ')
pages_text.append({"page_no": i+1, "text": text})
return pages_text
def process_images(self, tmp_dir):
""" Runs tesseract OCR on images found in the specified folder. Calculates max similarity for each word to set of initial words
"""
images = os.listdir(tmp_dir)
full_text = ''
unprocessed_images = []
relevant_images = []
for i in images:
try:
text = pytesseract.image_to_string(
Image.open(os.path.join(tmp_dir, i))) + '\n\n'
full_text += text
ocr_tokens = nlp(text)
df = pd.DataFrame(ocr_tokens, columns=['ocr_token'])
df['result'] = df['ocr_token'].apply(
lambda x: get_max_similarity(x))
if df['result'].max() > similarity_threshold:
relevant_images.append(i)
except Exception as error:
self.logger.warning(
f'Exception processing image! {i} Message: {error}')
unprocessed_images.append(i)
return full_text, unprocessed_images, relevant_images
def extract_with_pdfminer(self):
pages_text = []
with open(self.input_file, 'rb') as fp:
parser = PDFParser(fp)
doc = PDFDocument(parser)
pages = {page.pageid: pageno for (pageno, page)
in enumerate(PDFPage.create_pages(doc), 1)}
for idx, page in enumerate(tqdm(pages, disable=False)):
_buffer = StringIO()
with tempfile.TemporaryDirectory() as tmp_dir:
# If output_dir is given the extracted images are stored there.
extract_text_to_fp(
fp, outfp=_buffer, page_numbers=[idx], output_dir=tmp_dir)
text_from_images, unprocessed_images, relevant_images = self.process_images(
tmp_dir)
text = _buffer.getvalue() + '\n\n' + text_from_images
if len(unprocessed_images):
self.logger.info(
f'Ignoring {len(unprocessed_images)} unprocessable image(s)....')
# Move relevant files to save place
for i in relevant_images:
path = os.path.join(
self.output_folder, 'relevant_images')
os.makedirs(path, exist_ok=True)
shutil.move(os.path.join(tmp_dir, i),
os.path.join(path, i))
pages_text.append(
{"page_no": idx + 1, "text": text, "unprocessed_images": unprocessed_images, "relevant_images": relevant_images})
return pages_text
@timer
def extract_text_per_page(self):
if self.parser == "tika":
return self.extract_with_tika()
return self.extract_with_pdfminer()
def write_output(self):
output = {
"toc": self.toc,
"pages": self.pages_text
}
filename = Path(self.input_file).stem
out_file_path = os.path.join(self.output_folder, filename)
os.makedirs(os.path.dirname(out_file_path), exist_ok=True)
with open(out_file_path + '.yml', 'w') as fp:
yaml.dump(output, fp)
""""
with open(out_file_path + '.txt', 'w') as fp:
text = '\n\n<pagebreak />\n\n'.join(output['pages']['text'])
fp.write(text)
"""
def process_document(self):
try:
self.toc = self.extract_toc()
except Exception as error:
self.toc = []
self.logger.error(
f'Exception getting TOC! Message: {error}')
self.pages_text = self.extract_text_per_page()
self.write_output()
def parse_arguments():
parser = argparse.ArgumentParser()
# Mandatory arguments
parser.add_argument(
'input_file', help="Path to the input file", default=input_file, metavar="FILE", type=lambda f: is_existing_file(parser, f))
parser.add_argument(
'output_folder', help="Path to the output folder", default=output_folder, metavar="FOLDER", type=lambda f: is_valid_folder(parser, f))
# Optional arguments
parser.add_argument("-d", "--debug", help="Debug",
type=bool, default=False)
parser.add_argument("-l", "--log-level", help="Log level",
type=str, default="info")
parser.add_argument('-f', "--log-file",
help="Log file location. By default, it stores the log file in the output directory", default=None)
parser.add_argument("-p", "--parser", help="Specify the PDF parser",
choices=('tika', 'pdfminer'), default="pdfminer")
parser.add_argument("-o", "--ocr-strategy", help="Specify the OCR Strategy",
choices=('no_ocr', 'ocr_only', 'ocr_and_text'), default="no_ocr")
args = parser.parse_args()
return args
def main(**kwargs):
log_file = kwargs['log_file'] if kwargs.get('log_file') else None
log_level = logging.info if not kwargs.get('debug') else logging.debug
log_level = getattr(logging, kwargs['log_level'].upper()) if kwargs.get(
'log_level') else log_level
logging.basicConfig(filename=log_file)
logger = logging.getLogger('pdf_extractor')
logger.setLevel(log_level)
logger.info(f'PDF Extractor CLI initialized with arguments: \n{kwargs}')
# Run actual extractor
PdfExtractor(**kwargs)
if __name__ == '__main__':
args = parse_arguments()
main(**vars(args))
| StarcoderdataPython |
4933396 | <reponame>JFF-Bohdan/yabtool<filename>yabtool/shared/jinja2_helpers.py
import os
from jinja2 import BaseLoader, Environment, StrictUndefined
def jinja2_custom_filter_extract_year_four_digits(value):
return value.strftime("%Y")
def jinja2_custom_filter_extract_month_two_digits(value):
return value.strftime("%m")
def jinja2_custom_filter_extract_day_two_digits(value):
return value.strftime("%d")
def create_rendering_environment():
env = Environment(loader=BaseLoader, undefined=StrictUndefined)
env.filters["extract_year_four_digits"] = jinja2_custom_filter_extract_year_four_digits
env.filters["extract_month_two_digits"] = jinja2_custom_filter_extract_month_two_digits
env.filters["extract_day_two_digits"] = jinja2_custom_filter_extract_day_two_digits
env.filters["base_name"] = os.path.basename
return env
| StarcoderdataPython |
5157342 | import copy
from typing import Union
import gym
import numpy as np
import pytest
import torch as T
from pearll.models import Actor, ActorCritic, Critic, Dummy
from pearll.models.actor_critics import Model
from pearll.models.encoders import IdentityEncoder, MLPEncoder
from pearll.models.heads import BoxHead, DiagGaussianHead, ValueHead
from pearll.models.torsos import MLP
from pearll.settings import PopulationSettings
from pearll.signal_processing import (
crossover_operators,
mutation_operators,
selection_operators,
)
from pearll.updaters.actors import (
DeterministicPolicyGradient,
PolicyGradient,
ProximalPolicyClip,
SoftPolicyGradient,
)
from pearll.updaters.critics import (
ContinuousQRegression,
DiscreteQRegression,
ValueRegression,
)
from pearll.updaters.environment import DeepRegression
from pearll.updaters.evolution import GeneticUpdater, NoisyGradientAscent
############################### SET UP MODELS ###############################
encoder_critic = IdentityEncoder()
encoder_critic_continuous = MLPEncoder(input_size=3, output_size=2)
encoder_actor = IdentityEncoder()
torso_critic = MLP(layer_sizes=[2, 2])
torso_actor = MLP(layer_sizes=[2, 2])
head_actor = DiagGaussianHead(input_shape=2, action_size=1)
head_critic = ValueHead(input_shape=2, activation_fn=None)
actor = Actor(encoder=encoder_actor, torso=torso_actor, head=head_actor)
critic = Critic(encoder=encoder_critic, torso=torso_critic, head=head_critic)
continuous_critic = Critic(
encoder=encoder_critic_continuous, torso=torso_critic, head=head_critic
)
continuous_critic_shared = Critic(
encoder=encoder_critic_continuous, torso=torso_actor, head=head_critic
)
critic_shared = Critic(encoder=encoder_actor, torso=torso_actor, head=head_critic)
actor_critic = ActorCritic(actor=actor, critic=critic)
actor_critic_shared = ActorCritic(actor=actor, critic=critic_shared)
continuous_actor_critic = ActorCritic(actor=actor, critic=continuous_critic)
continuous_actor_critic_shared = ActorCritic(
actor=actor, critic=continuous_critic_shared
)
marl = ActorCritic(
actor=actor,
critic=critic,
population_settings=PopulationSettings(
actor_population_size=2, critic_population_size=2
),
)
marl_shared = ActorCritic(
actor=actor,
critic=critic_shared,
population_settings=PopulationSettings(
actor_population_size=2, critic_population_size=2
),
)
marl_continuous = ActorCritic(
actor=actor,
critic=continuous_critic,
population_settings=PopulationSettings(
actor_population_size=2, critic_population_size=2
),
)
marl_shared_continuous = ActorCritic(
actor=actor,
critic=continuous_critic_shared,
population_settings=PopulationSettings(
actor_population_size=2, critic_population_size=2
),
)
T.manual_seed(0)
np.random.seed(0)
def same_distribution(
dist1: T.distributions.Distribution, dist2: T.distributions.Distribution
) -> bool:
return T.equal(dist1.loc, dist2.loc) and T.equal(dist1.scale, dist2.scale)
############################### TEST ACTOR UPDATERS ###############################
@pytest.mark.parametrize(
"model", [actor, actor_critic, actor_critic_shared, marl, marl_shared]
)
def test_policy_gradient(model: Union[Actor, ActorCritic]):
observation = T.rand(2)
if model != actor:
with T.no_grad():
observation = observation.repeat(model.num_actors, 1)
critic_before = model.forward_critics(observation)
out_before = model.action_distribution(observation)
updater = PolicyGradient(max_grad=0.5)
updater(
model=model,
observations=observation,
actions=T.rand(1),
advantages=T.rand(1),
)
out_after = model.action_distribution(observation)
if model != actor:
with T.no_grad():
critic_after = model.forward_critics(observation)
assert not same_distribution(out_after, out_before)
if model == actor_critic or model == marl:
assert T.equal(critic_before, critic_after)
if model == actor_critic_shared or model == marl_shared:
assert not T.equal(critic_before, critic_after)
@pytest.mark.parametrize(
"model", [actor, actor_critic, actor_critic_shared, marl, marl_shared]
)
def test_proximal_policy_clip(model: Union[Actor, ActorCritic]):
observation = T.rand(2)
if model != actor:
with T.no_grad():
observation = observation.repeat(model.num_actors, 1)
critic_before = model.forward_critics(observation)
out_before = model.action_distribution(observation)
updater = ProximalPolicyClip(max_grad=0.5)
updater(
model=model,
observations=observation,
actions=T.rand(1),
advantages=T.rand(1),
old_log_probs=T.rand(1),
)
out_after = model.action_distribution(observation)
if model != actor:
with T.no_grad():
critic_after = model.forward_critics(observation)
assert not same_distribution(out_after, out_before)
if model == actor_critic or model == marl:
assert T.equal(critic_before, critic_after)
if model == actor_critic_shared or model == marl_shared:
assert not T.equal(critic_before, critic_after)
@pytest.mark.parametrize(
"model",
[
continuous_actor_critic,
continuous_actor_critic_shared,
marl_continuous,
marl_shared_continuous,
],
)
def test_deterministic_policy_gradient(model: ActorCritic):
observation = T.rand(2).repeat(model.num_actors, 1)
action = model(observation)
with T.no_grad():
critic_before = model.forward_critics(observation, action)
updater = DeterministicPolicyGradient(max_grad=0.5)
updater(
model=model,
observations=observation,
)
out_after = model(observation)
with T.no_grad():
critic_after = model.forward_critics(observation, action)
assert not T.equal(action, out_after)
if model == continuous_actor_critic or model == marl_continuous:
assert T.equal(critic_before, critic_after)
if model == continuous_actor_critic_shared or model == marl_shared_continuous:
assert not T.equal(critic_before, critic_after)
@pytest.mark.parametrize(
"model",
[
continuous_actor_critic,
continuous_actor_critic_shared,
marl_continuous,
marl_shared_continuous,
],
)
def test_soft_policy_gradient(model: ActorCritic):
observation = T.rand(2).repeat(model.num_actors, 1)
action = model(observation)
out_before = model.action_distribution(observation)
with T.no_grad():
critic_before = model.forward_critics(observation, action)
updater = SoftPolicyGradient(max_grad=0.5)
updater(
model=model,
observations=observation,
)
out_after = model.action_distribution(observation)
with T.no_grad():
critic_after = model.forward_critics(observation, action)
assert not same_distribution(out_after, out_before)
if model == continuous_actor_critic or model == marl_continuous:
assert T.equal(critic_before, critic_after)
if model == continuous_actor_critic_shared or model == marl_shared_continuous:
assert not T.equal(critic_before, critic_after)
############################### TEST CRITIC UPDATERS ###############################
@pytest.mark.parametrize(
"model", [critic, actor_critic, actor_critic_shared, marl, marl_shared]
)
def test_value_regression(model: Union[Critic, ActorCritic]):
observation = T.rand(2)
returns = T.rand(1)
if model != critic:
observation = observation.repeat(model.num_critics, 1)
with T.no_grad():
actor_before = model.action_distribution(observation)
out_before = model.forward_critics(observation)
else:
out_before = model(observation)
updater = ValueRegression(max_grad=0.5)
updater(model, observation, returns)
if model != critic:
out_after = model.forward_critics(observation)
with T.no_grad():
actor_after = model.action_distribution(observation)
else:
out_after = model(observation)
assert not T.equal(out_after, out_before)
if model == actor_critic_shared or model == marl_shared:
assert not same_distribution(actor_before, actor_after)
elif model == actor_critic or model == marl:
assert same_distribution(actor_before, actor_after)
@pytest.mark.parametrize("model", [critic, actor_critic, actor_critic_shared])
def test_discrete_q_regression(model: Union[Critic, ActorCritic]):
observation = T.rand(1, 2)
actions = T.randint(0, 1, (1, 1))
returns = T.rand(1)
if model == critic:
out_before = model(observation)
else:
out_before = model.forward_critics(observation)
with T.no_grad():
actor_before = model.action_distribution(observation)
updater = DiscreteQRegression(max_grad=0.5)
updater(model, observation, returns, actions)
if model == critic:
out_after = model(observation)
else:
out_after = model.forward_critics(observation)
with T.no_grad():
actor_after = model.action_distribution(observation)
assert not T.equal(out_before, out_after)
if model == actor_critic_shared:
assert not same_distribution(actor_before, actor_after)
elif model == actor_critic:
assert same_distribution(actor_before, actor_after)
@pytest.mark.parametrize(
"model",
[continuous_actor_critic, continuous_actor_critic_shared, continuous_critic],
)
def test_continuous_q_regression(model: Union[Critic, ActorCritic]):
observation = T.rand(1, 2)
actions = T.rand(1, 1)
returns = T.rand(1)
if model == continuous_critic:
out_before = model(observation, actions)
else:
out_before = model.forward_critics(observation, actions)
with T.no_grad():
actor_before = model.action_distribution(observation)
updater = ContinuousQRegression(max_grad=0.5)
updater(model, observation, actions, returns)
if model == continuous_critic:
out_after = model(observation, actions)
else:
out_after = model.forward_critics(observation, actions)
with T.no_grad():
actor_after = model.action_distribution(observation)
assert out_after != out_before
if model == continuous_actor_critic_shared:
assert not same_distribution(actor_before, actor_after)
elif model == continuous_actor_critic:
assert same_distribution(actor_before, actor_after)
############################### TEST EVOLUTION UPDATERS ###############################
class Sphere(gym.Env):
"""
Sphere(2) function for testing ES agent.
"""
def __init__(self):
self.action_space = gym.spaces.Box(low=-100, high=100, shape=(2,))
self.observation_space = gym.spaces.Discrete(1)
def step(self, action):
return 0, -(action[0] ** 2 + action[1] ** 2), False, {}
def reset(self):
return 0
class DiscreteSphere(gym.Env):
"""
Discrete Sphere(1) function for testing ES agent.
"""
def __init__(self):
self.action_space = gym.spaces.Discrete(10)
self.observation_space = gym.spaces.Discrete(1)
def step(self, action):
return 0, -(action ** 2), False, {}
def reset(self):
return 0
POPULATION_SIZE = 100
env_continuous = gym.vector.SyncVectorEnv(
[lambda: Sphere() for _ in range(POPULATION_SIZE)]
)
env_discrete = gym.vector.SyncVectorEnv(
[lambda: DiscreteSphere() for _ in range(POPULATION_SIZE)]
)
def test_evolutionary_updater_continuous():
actor_continuous = Dummy(
space=env_continuous.single_action_space, state=np.array([10, 10])
)
critic = Dummy(space=env_continuous.single_action_space)
model_continuous = ActorCritic(
actor=actor_continuous,
critic=critic,
population_settings=PopulationSettings(
actor_population_size=POPULATION_SIZE, actor_distribution="normal"
),
)
# ASSERT POPULATION STATS
updater = NoisyGradientAscent(model_continuous)
# make sure starting population mean is correct
np.testing.assert_allclose(
np.mean(model_continuous.numpy_actors(), axis=0), np.array([10, 10]), rtol=0.1
)
# TEST CALL
old_model = copy.deepcopy(model_continuous)
old_population = model_continuous.numpy_actors()
action = model_continuous(np.zeros(POPULATION_SIZE))
_, rewards, _, _ = env_continuous.step(action)
scaled_rewards = (rewards - np.mean(rewards)) / np.std(rewards)
optimization_direction = np.dot(updater.normal_dist.T, scaled_rewards)
log = updater(learning_rate=0.01, optimization_direction=optimization_direction)
new_population = model_continuous.numpy_actors()
assert log.divergence > 0
# make sure the nerual network has been updated by the updater
assert model_continuous != old_model
assert np.not_equal(old_population, new_population).any()
# make sure the network mean has been updated by the updater
np.testing.assert_array_equal(model_continuous.mean_actor, updater.mean)
# make sure the new popualtion has the correct std
np.testing.assert_allclose(
np.std(model_continuous.numpy_actors(), axis=0), np.ones(2), rtol=0.1
)
# make sure the update direction is correct
np.testing.assert_array_less(
np.mean(model_continuous.numpy_actors(), axis=0), np.array([10, 10])
)
def test_evolutionary_updater_discrete():
actor_discrete = Dummy(space=env_discrete.single_action_space, state=np.array([5]))
critic = Dummy(space=env_discrete.single_action_space)
model_discrete = ActorCritic(
actor=actor_discrete,
critic=critic,
population_settings=PopulationSettings(
actor_population_size=POPULATION_SIZE, actor_distribution="normal"
),
)
# ASSERT POPULATION STATS
updater = NoisyGradientAscent(model_discrete)
# make sure the population has been discretized
assert np.issubdtype(model_discrete.numpy_actors().dtype, np.integer)
# make sure starting population mean is correct
np.testing.assert_allclose(
np.mean(model_discrete.numpy_actors(), axis=0), np.array([5]), rtol=0.1
)
# Test call
old_model = copy.deepcopy(model_discrete)
old_population = model_discrete.numpy_actors()
action = model_discrete(np.zeros(POPULATION_SIZE))
_, rewards, _, _ = env_discrete.step(action)
scaled_rewards = (rewards - np.mean(rewards)) / np.std(rewards)
optimization_direction = np.dot(updater.normal_dist.T, scaled_rewards)
log = updater(learning_rate=1e-5, optimization_direction=optimization_direction)
new_population = model_discrete.numpy_actors()
assert log.divergence > 0
# make sure the nerual network has been updated by the updater
assert model_discrete != old_model
assert np.not_equal(old_population, new_population).any()
# make sure the population has been discretized
assert np.issubdtype(new_population.dtype, np.integer)
# make sure the network mean has been updated by the updater
np.testing.assert_array_equal(model_discrete.mean_actor, updater.mean)
# make sure the new popualtion has the correct std
np.testing.assert_allclose(np.std(new_population, axis=0), np.ones(1), rtol=0.1)
# make sure the update direction is correct
np.testing.assert_array_less(model_discrete.mean_actor, np.array([5]))
def test_genetic_updater_continuous():
actor_continuous = Dummy(
space=env_continuous.single_action_space, state=np.array([10, 10])
)
critic = Dummy(space=env_continuous.single_action_space)
model_continuous = ActorCritic(
actor=actor_continuous,
critic=critic,
population_settings=PopulationSettings(
actor_population_size=POPULATION_SIZE, actor_distribution="normal"
),
)
# Assert population stats
updater = GeneticUpdater(model_continuous)
np.testing.assert_allclose(
np.mean(model_continuous.numpy_actors(), axis=0), np.array([10, 10]), rtol=0.1
)
# Test call
old_model = copy.deepcopy(model_continuous)
old_population = model_continuous.numpy_actors()
action = model_continuous(np.zeros(POPULATION_SIZE))
_, rewards, _, _ = env_continuous.step(action)
log = updater(
rewards=rewards,
selection_operator=selection_operators.roulette_selection,
crossover_operator=crossover_operators.one_point_crossover,
mutation_operator=mutation_operators.uniform_mutation,
)
new_population = model_continuous.numpy_actors()
assert log.divergence > 0
assert model_continuous != old_model
assert np.not_equal(old_population, new_population).any()
np.testing.assert_array_less(np.min(new_population, axis=0), np.array([10, 10]))
def test_genetic_updater_discrete():
actor_discrete = Dummy(space=env_discrete.single_action_space, state=np.array([5]))
critic = Dummy(space=env_discrete.single_action_space)
model_discrete = ActorCritic(
actor=actor_discrete,
critic=critic,
population_settings=PopulationSettings(
actor_population_size=POPULATION_SIZE, actor_distribution="uniform"
),
)
# Assert population stats
updater = GeneticUpdater(model_discrete)
old_model = copy.deepcopy(model_discrete)
old_population = model_discrete.numpy_actors()
assert np.issubdtype(old_population.dtype, np.integer)
np.testing.assert_allclose(np.mean(old_population, axis=0), np.array([5]), rtol=0.2)
# Test call
action = model_discrete(np.zeros(POPULATION_SIZE))
_, rewards, _, _ = env_discrete.step(action)
log = updater(
rewards=rewards,
selection_operator=selection_operators.roulette_selection,
crossover_operator=crossover_operators.one_point_crossover,
mutation_operator=mutation_operators.uniform_mutation,
)
new_population = model_discrete.numpy_actors()
assert np.issubdtype(new_population.dtype, np.integer)
assert log.divergence > 0
assert model_discrete != old_model
assert np.not_equal(old_population, new_population).any()
np.testing.assert_array_less(np.min(new_population, axis=0), np.array([5]))
############################### TEST ENVIRONMENT UPDATERS ###############################
@pytest.mark.parametrize("loss_class", [T.nn.MSELoss(), T.nn.BCELoss()])
def test_deep_env_updater(loss_class):
T.manual_seed(0)
np.random.seed(0)
encoder = IdentityEncoder()
torso = MLP(layer_sizes=[3, 1, 1])
head = BoxHead(input_shape=1, space_shape=2, activation_fn=T.nn.Softmax)
deep_model = Model(encoder=encoder, torso=torso, head=head)
updater = DeepRegression(loss_class=loss_class)
observations = T.Tensor([1, 1])
actions = T.Tensor([1])
targets = T.Tensor([1, 1])
if isinstance(loss_class, T.nn.BCELoss):
targets = T.Tensor([False])
log = updater(deep_model, observations, actions, targets)
if isinstance(loss_class, T.nn.MSELoss):
assert log.loss == 0.2826874256134033
elif isinstance(loss_class, T.nn.BCELoss):
assert log.loss == 1.141926884651184
| StarcoderdataPython |
6549771 | from unittest import skip
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the public available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that logon is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
email='<EMAIL>',
password='password'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(
user=get_user_model().objects.create_user(
email='<EMAIL>',
password='password',
name='name'
),
name='Dessert'
)
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user, name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating invalid tag"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Recipe1',
time_minutes=10,
price=5.90,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
@skip('Implementation does not work correctly')
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag1 = Tag.objects.create(user=self.user, name='Tag1')
tag2 = Tag.objects.create(user=self.user, name='Tag2')
recipe1 = Recipe.objects.create(
title='Recipe1',
time_minutes=10,
price=5.90,
user=self.user
)
recipe2 = Recipe.objects.create(
title='Recipe2',
time_minutes=10,
price=5.90,
user=self.user
)
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| StarcoderdataPython |
12509 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow_probability import distributions as tfd
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from ncp import tools
def network(inputs, config):
hidden = inputs
for size in config.layer_sizes:
hidden = tf.layers.dense(hidden, size, tf.nn.leaky_relu)
mean = tf.layers.dense(hidden, 1)
noise = tf.layers.dense(hidden, 1, tf.nn.softplus) + 1e-6
uncertainty = tf.layers.dense(hidden, 1, None)
return mean, noise, uncertainty
def define_graph(config):
network_tpl = tf.make_template('network', network, config=config)
inputs = tf.placeholder(tf.float32, [None, config.num_inputs])
targets = tf.placeholder(tf.float32, [None, 1])
num_visible = tf.placeholder(tf.int32, [])
batch_size = tf.to_float(tf.shape(inputs)[0])
data_mean, data_noise, data_uncertainty = network_tpl(inputs)
ood_inputs = inputs + tf.random_normal(
tf.shape(inputs), 0.0, config.noise_std)
ood_mean, ood_noise, ood_uncertainty = network_tpl(ood_inputs)
losses = [
-tfd.Normal(data_mean, data_noise).log_prob(targets),
-tfd.Bernoulli(data_uncertainty).log_prob(0),
-tfd.Bernoulli(ood_uncertainty).log_prob(1),
]
if config.center_at_target:
losses.append(-tfd.Normal(ood_mean, ood_noise).log_prob(targets))
loss = sum(tf.reduce_sum(loss) for loss in losses) / batch_size
optimizer = tf.train.AdamOptimizer(config.learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(
loss, colocate_gradients_with_ops=True))
if config.clip_gradient:
gradients, _ = tf.clip_by_global_norm(gradients, config.clip_gradient)
optimize = optimizer.apply_gradients(zip(gradients, variables))
data_uncertainty = tf.sigmoid(data_uncertainty)
if not config.center_at_target:
data_mean = (1 - data_uncertainty) * data_mean + data_uncertainty * 0
data_noise = (1 - data_uncertainty) * data_noise + data_uncertainty * 0.1
return tools.AttrDict(locals())
| StarcoderdataPython |
11310764 | <filename>rotkehlchen/inquirer.py
from __future__ import unicode_literals # isort:skip
import logging
import operator
from enum import auto
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, NamedTuple, Optional, Tuple, Union
from rotkehlchen.assets.asset import Asset, EthereumToken
from rotkehlchen.chain.ethereum.contracts import EthereumContract
from rotkehlchen.chain.ethereum.defi.curve_pools import get_curve_pools
from rotkehlchen.chain.ethereum.defi.price import handle_defi_price_query
from rotkehlchen.chain.ethereum.utils import multicall_2, token_normalized_value_decimals
from rotkehlchen.constants import CURRENCYCONVERTER_API_KEY, ZERO
from rotkehlchen.constants.assets import (
A_3CRV,
A_ALINK_V1,
A_BSQ,
A_BTC,
A_CRV_3CRV,
A_CRV_3CRVSUSD,
A_CRV_GUSD,
A_CRV_RENWBTC,
A_CRV_YPAX,
A_CRVP_DAIUSDCTBUSD,
A_CRVP_DAIUSDCTTUSD,
A_CRVP_RENWSBTC,
A_DAI,
A_ETH,
A_FARM_CRVRENWBTC,
A_FARM_DAI,
A_FARM_RENBTC,
A_FARM_TUSD,
A_FARM_USDC,
A_FARM_USDT,
A_FARM_WBTC,
A_FARM_WETH,
A_GUSD,
A_KFEE,
A_TUSD,
A_USD,
A_USDC,
A_USDT,
A_WETH,
A_YFI,
A_YV1_3CRV,
A_YV1_ALINK,
A_YV1_DAI,
A_YV1_DAIUSDCTBUSD,
A_YV1_DAIUSDCTTUSD,
A_YV1_GUSD,
A_YV1_RENWSBTC,
A_YV1_TUSD,
A_YV1_USDC,
A_YV1_USDT,
A_YV1_WETH,
A_YV1_YFI,
)
from rotkehlchen.constants.ethereum import CURVE_POOL_ABI, YEARN_VAULT_V2_ABI
from rotkehlchen.constants.timing import DAY_IN_SECONDS, MONTH_IN_SECONDS
from rotkehlchen.errors.asset import UnknownAsset
from rotkehlchen.errors.defi import DefiPoolError
from rotkehlchen.errors.misc import BlockchainQueryError, RemoteError, UnableToDecryptRemoteData
from rotkehlchen.errors.price import PriceQueryUnsupportedAsset
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.externalapis.bisq_market import get_bisq_market_price
from rotkehlchen.externalapis.xratescom import (
get_current_xratescom_exchange_rates,
get_historical_xratescom_exchange_rates,
)
from rotkehlchen.fval import FVal
from rotkehlchen.globaldb.handler import GlobalDBHandler
from rotkehlchen.history.types import HistoricalPrice, HistoricalPriceOracle
from rotkehlchen.interfaces import CurrentPriceOracleInterface
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import (
CURVE_POOL_PROTOCOL,
UNISWAP_PROTOCOL,
YEARN_VAULTS_V2_PROTOCOL,
KnownProtocolsAssets,
Price,
Timestamp,
)
from rotkehlchen.utils.misc import timestamp_to_daystart_timestamp, ts_now
from rotkehlchen.utils.mixins.serializableenum import SerializableEnumMixin
from rotkehlchen.utils.network import request_get_dict
if TYPE_CHECKING:
from rotkehlchen.chain.ethereum.manager import EthereumManager
from rotkehlchen.chain.ethereum.oracles.saddle import SaddleOracle
from rotkehlchen.chain.ethereum.oracles.uniswap import UniswapV2Oracle, UniswapV3Oracle
from rotkehlchen.externalapis.coingecko import Coingecko
from rotkehlchen.externalapis.cryptocompare import Cryptocompare
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
CURRENT_PRICE_CACHE_SECS = 300 # 5 mins
BTC_PER_BSQ = FVal('0.00000100')
ASSETS_UNDERLYING_BTC = (
A_YV1_RENWSBTC,
A_FARM_CRVRENWBTC,
A_FARM_RENBTC,
A_FARM_WBTC,
A_CRV_RENWBTC,
A_CRVP_RENWSBTC,
)
CurrentPriceOracleInstance = Union[
'Coingecko',
'Cryptocompare',
'UniswapV3Oracle',
'UniswapV2Oracle',
'SaddleOracle',
]
def _check_curve_contract_call(decoded: Tuple[Any, ...]) -> bool:
"""
Checks the result of decoding curve contract methods to verify:
- The result is a tuple
- It should return only one value
- The value should be an integer
Returns true if the decode was correct
"""
return (
isinstance(decoded, tuple) and
len(decoded) == 1 and
isinstance(decoded[0], int)
)
class CurrentPriceOracle(SerializableEnumMixin):
"""Supported oracles for querying current prices"""
COINGECKO = auto()
CRYPTOCOMPARE = auto()
UNISWAPV2 = auto()
UNISWAPV3 = auto()
SADDLE = auto()
DEFAULT_CURRENT_PRICE_ORACLES_ORDER = [
CurrentPriceOracle.COINGECKO,
CurrentPriceOracle.CRYPTOCOMPARE,
CurrentPriceOracle.UNISWAPV2,
CurrentPriceOracle.UNISWAPV3,
CurrentPriceOracle.SADDLE,
]
def get_underlying_asset_price(token: EthereumToken) -> Optional[Price]:
"""Gets the underlying asset price for the given ethereum token
TODO: This should be eventually pulled from the assets DB. All of these
need to be updated, to contain proper protocol, and underlying assets.
This function is neither in inquirer.py or chain/ethereum/defi.py
due to recursive import problems
"""
price = None
if token.protocol == UNISWAP_PROTOCOL:
price = Inquirer().find_uniswap_v2_lp_price(token)
elif token.protocol == CURVE_POOL_PROTOCOL:
price = Inquirer().find_curve_pool_price(token)
elif token.protocol == YEARN_VAULTS_V2_PROTOCOL:
price = Inquirer().find_yearn_price(token)
if token == A_YV1_ALINK:
price = Inquirer().find_usd_price(A_ALINK_V1)
elif token == A_YV1_GUSD:
price = Inquirer().find_usd_price(A_GUSD)
elif token in (A_YV1_DAI, A_FARM_DAI):
price = Inquirer().find_usd_price(A_DAI)
elif token in (A_FARM_WETH, A_YV1_WETH):
price = Inquirer().find_usd_price(A_ETH)
elif token == A_YV1_YFI:
price = Inquirer().find_usd_price(A_YFI)
elif token in (A_FARM_USDT, A_YV1_USDT):
price = Inquirer().find_usd_price(A_USDT)
elif token in (A_FARM_USDC, A_YV1_USDC):
price = Inquirer().find_usd_price(A_USDC)
elif token in (A_FARM_TUSD, A_YV1_TUSD):
price = Inquirer().find_usd_price(A_TUSD)
elif token in ASSETS_UNDERLYING_BTC:
price = Inquirer().find_usd_price(A_BTC)
# At this point we have to return the price if it's not None. If we don't do this and got
# a price for a token that has underlying assets, the code will enter the if statement after
# this block and the value for price will change becoming incorrect.
if price is not None:
return price
custom_token = GlobalDBHandler().get_ethereum_token(token.ethereum_address)
if custom_token and custom_token.underlying_tokens is not None:
usd_price = ZERO
for underlying_token in custom_token.underlying_tokens:
token = EthereumToken(underlying_token.address)
usd_price += Inquirer().find_usd_price(token) * underlying_token.weight
if usd_price != Price(ZERO):
price = Price(usd_price)
return price
def _query_currency_converterapi(base: Asset, quote: Asset) -> Optional[Price]:
assert base.is_fiat(), 'fiat currency should have been provided'
assert quote.is_fiat(), 'fiat currency should have been provided'
log.debug(
'Query free.currencyconverterapi.com fiat pair',
base_currency=base.identifier,
quote_currency=quote.identifier,
)
pair = f'{base.identifier}_{quote.identifier}'
querystr = (
f'https://free.currconv.com/api/v7/convert?'
f'q={pair}&compact=ultra&apiKey={CURRENCYCONVERTER_API_KEY}'
)
try:
resp = request_get_dict(querystr)
return Price(FVal(resp[pair]))
except (ValueError, RemoteError, KeyError, UnableToDecryptRemoteData):
log.error(
'Querying free.currencyconverterapi.com fiat pair failed',
base_currency=base.identifier,
quote_currency=quote.identifier,
)
return None
class CachedPriceEntry(NamedTuple):
price: Price
time: Timestamp
class Inquirer():
__instance: Optional['Inquirer'] = None
_cached_forex_data: Dict
_cached_current_price: Dict # Can't use CacheableMixIn due to Singleton
_data_directory: Path
_cryptocompare: 'Cryptocompare'
_coingecko: 'Coingecko'
_uniswapv2: Optional['UniswapV2Oracle'] = None
_uniswapv3: Optional['UniswapV3Oracle'] = None
_saddle: Optional['SaddleOracle'] = None
_ethereum: Optional['EthereumManager'] = None
_oracles: Optional[List[CurrentPriceOracle]] = None
_oracle_instances: Optional[List[CurrentPriceOracleInstance]] = None
special_tokens: List[EthereumToken]
def __new__(
cls,
data_dir: Path = None,
cryptocompare: 'Cryptocompare' = None,
coingecko: 'Coingecko' = None,
) -> 'Inquirer':
if Inquirer.__instance is not None:
return Inquirer.__instance
assert data_dir, 'arguments should be given at the first instantiation'
assert cryptocompare, 'arguments should be given at the first instantiation'
assert coingecko, 'arguments should be given at the first instantiation'
Inquirer.__instance = object.__new__(cls)
Inquirer.__instance._data_directory = data_dir
Inquirer._cryptocompare = cryptocompare
Inquirer._coingecko = coingecko
Inquirer._cached_current_price = {}
Inquirer.special_tokens = [
A_YV1_DAIUSDCTBUSD,
A_CRVP_DAIUSDCTBUSD,
A_CRVP_DAIUSDCTTUSD,
A_YV1_DAIUSDCTTUSD,
A_YV1_DAIUSDCTTUSD,
A_CRVP_RENWSBTC,
A_YV1_RENWSBTC,
A_CRV_RENWBTC,
A_CRV_YPAX,
A_CRV_GUSD,
A_CRV_3CRV,
A_YV1_3CRV,
A_CRV_3CRVSUSD,
A_YV1_ALINK,
A_YV1_DAI,
A_YV1_WETH,
A_YV1_YFI,
A_YV1_USDT,
A_YV1_USDC,
A_YV1_TUSD,
A_YV1_GUSD,
A_FARM_USDC,
A_FARM_USDT,
A_FARM_DAI,
A_FARM_TUSD,
A_FARM_WETH,
A_FARM_WBTC,
A_FARM_RENBTC,
A_FARM_CRVRENWBTC,
A_3CRV,
]
return Inquirer.__instance
@staticmethod
def inject_ethereum(ethereum: 'EthereumManager') -> None:
Inquirer()._ethereum = ethereum
@staticmethod
def add_defi_oracles(
uniswap_v2: Optional['UniswapV2Oracle'],
uniswap_v3: Optional['UniswapV3Oracle'],
saddle: Optional['SaddleOracle'],
) -> None:
Inquirer()._uniswapv2 = uniswap_v2
Inquirer()._uniswapv3 = uniswap_v3
Inquirer()._saddle = saddle
@staticmethod
def get_cached_current_price_entry(cache_key: Tuple[Asset, Asset]) -> Optional[CachedPriceEntry]: # noqa: E501
cache = Inquirer()._cached_current_price.get(cache_key, None)
if cache is None or ts_now() - cache.time > CURRENT_PRICE_CACHE_SECS:
return None
return cache
@staticmethod
def set_oracles_order(oracles: List[CurrentPriceOracle]) -> None:
assert len(oracles) != 0 and len(oracles) == len(set(oracles)), (
'Oracles can\'t be empty or have repeated items'
)
instance = Inquirer()
instance._oracles = oracles
instance._oracle_instances = [getattr(instance, f'_{str(oracle)}') for oracle in oracles]
@staticmethod
def _query_oracle_instances(
from_asset: Asset,
to_asset: Asset,
) -> Price:
instance = Inquirer()
cache_key = (from_asset, to_asset)
oracles = instance._oracles
oracle_instances = instance._oracle_instances
assert isinstance(oracles, list) and isinstance(oracle_instances, list), (
'Inquirer should never be called before the setting the oracles'
)
price = Price(ZERO)
for oracle, oracle_instance in zip(oracles, oracle_instances):
if (
isinstance(oracle_instance, CurrentPriceOracleInterface) and
oracle_instance.rate_limited_in_last() is True
):
continue
try:
price = oracle_instance.query_current_price(
from_asset=from_asset,
to_asset=to_asset,
)
except (DefiPoolError, PriceQueryUnsupportedAsset, RemoteError) as e:
log.error(
f'Current price oracle {oracle} failed to request {to_asset.identifier} '
f'price for {from_asset.identifier} due to: {str(e)}.',
)
continue
if price != Price(ZERO):
log.debug(
f'Current price oracle {oracle} got price',
from_asset=from_asset,
to_asset=to_asset,
price=price,
)
break
Inquirer._cached_current_price[cache_key] = CachedPriceEntry(price=price, time=ts_now())
return price
@staticmethod
def find_price(
from_asset: Asset,
to_asset: Asset,
ignore_cache: bool = False,
) -> Price:
"""Returns the current price of 'from_asset' in 'to_asset' valuation.
NB: prices for special symbols in any currency but USD are not supported.
Returns Price(ZERO) if all options have been exhausted and errors are logged in the logs
"""
if from_asset == to_asset:
return Price(FVal('1'))
instance = Inquirer()
if to_asset == A_USD:
return instance.find_usd_price(asset=from_asset, ignore_cache=ignore_cache)
if ignore_cache is False:
cache = instance.get_cached_current_price_entry(cache_key=(from_asset, to_asset))
if cache is not None:
return cache.price
oracle_price = instance._query_oracle_instances(from_asset=from_asset, to_asset=to_asset)
return oracle_price
@staticmethod
def find_usd_price(
asset: Asset,
ignore_cache: bool = False,
) -> Price:
"""Returns the current USD price of the asset
Returns Price(ZERO) if all options have been exhausted and errors are logged in the logs
"""
if asset == A_USD:
return Price(FVal(1))
instance = Inquirer()
cache_key = (asset, A_USD)
if ignore_cache is False:
cache = instance.get_cached_current_price_entry(cache_key=cache_key)
if cache is not None:
return cache.price
if asset.is_fiat():
try:
return instance._query_fiat_pair(base=asset, quote=A_USD)
except RemoteError:
pass # continue, a price can be found by one of the oracles (CC for example)
# Try and check if it is an ethereum token with specified protocol or underlying tokens
is_known_protocol = False
underlying_tokens = None
try:
token = EthereumToken.from_asset(asset)
if token is not None:
if token.protocol is not None:
is_known_protocol = token.protocol in KnownProtocolsAssets
underlying_tokens = GlobalDBHandler().get_ethereum_token( # type: ignore
token.ethereum_address,
).underlying_tokens
except UnknownAsset:
pass
# Check if it is a special token
if asset in instance.special_tokens:
ethereum = instance._ethereum
assert ethereum, 'Inquirer should never be called before the injection of ethereum'
assert token, 'all assets in special tokens are already ethereum tokens'
underlying_asset_price = get_underlying_asset_price(token)
usd_price = handle_defi_price_query(
ethereum=ethereum,
token=token,
underlying_asset_price=underlying_asset_price,
)
if usd_price is None:
price = Price(ZERO)
else:
price = Price(usd_price)
Inquirer._cached_current_price[cache_key] = CachedPriceEntry(price=price, time=ts_now()) # noqa: E501
return price
if is_known_protocol is True or underlying_tokens is not None:
assert token is not None
result = get_underlying_asset_price(token)
if result is None:
usd_price = Price(ZERO)
if instance._ethereum is not None:
instance._ethereum.msg_aggregator.add_warning(
f'Could not find price for {token}',
)
else:
usd_price = Price(result)
Inquirer._cached_current_price[cache_key] = CachedPriceEntry(
price=usd_price,
time=ts_now(),
)
return usd_price
# BSQ is a special asset that doesnt have oracle information but its custom API
if asset == A_BSQ:
try:
price_in_btc = get_bisq_market_price(asset)
btc_price = Inquirer().find_usd_price(A_BTC)
usd_price = Price(price_in_btc * btc_price)
Inquirer._cached_current_price[cache_key] = CachedPriceEntry(
price=usd_price,
time=ts_now(),
)
return usd_price
except (RemoteError, DeserializationError) as e:
msg = f'Could not find price for BSQ. {str(e)}'
if instance._ethereum is not None:
instance._ethereum.msg_aggregator.add_warning(msg)
return Price(BTC_PER_BSQ * price_in_btc)
if asset == A_KFEE:
# KFEE is a kraken special asset where 1000 KFEE = 10 USD
return Price(FVal(0.01))
return instance._query_oracle_instances(from_asset=asset, to_asset=A_USD)
def find_uniswap_v2_lp_price(
self,
token: EthereumToken,
) -> Optional[Price]:
assert self._ethereum is not None, 'Inquirer ethereum manager should have been initialized' # noqa: E501
# BAD BAD BAD. TODO: Need to rethinking placement of modules here
from rotkehlchen.chain.ethereum.modules.uniswap.utils import find_uniswap_v2_lp_price # isort:skip # noqa: E501 # pylint: disable=import-outside-toplevel
return find_uniswap_v2_lp_price(
ethereum=self._ethereum,
token=token,
token_price_func=self.find_usd_price,
token_price_func_args=[],
block_identifier='latest',
)
def find_curve_pool_price(
self,
lp_token: EthereumToken,
) -> Optional[Price]:
"""
1. Obtain the pool for this token
2. Obtain prices for assets in pool
3. Obtain the virtual price for share and the balances of each
token in the pool
4. Calc the price for a share
Returns the price of 1 LP token from the pool
"""
assert self._ethereum is not None, 'Inquirer ethereum manager should have been initialized' # noqa: E501
pools = get_curve_pools()
if lp_token.ethereum_address not in pools:
return None
pool = pools[lp_token.ethereum_address]
tokens = []
# Translate addresses to tokens
try:
for asset in pool.assets:
if asset == '0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE':
tokens.append(A_WETH)
else:
tokens.append(EthereumToken(asset))
except UnknownAsset:
return None
# Get price for each token in the pool
prices = []
for token in tokens:
price = self.find_usd_price(token)
if price == Price(ZERO):
log.error(
f'Could not calculate price for {lp_token} due to inability to '
f'fetch price for {token}.',
)
return None
prices.append(price)
# Query virtual price of LP share and balances in the pool for each token
contract = EthereumContract(
address=pool.pool_address,
abi=CURVE_POOL_ABI,
deployed_block=0,
)
calls = [(pool.pool_address, contract.encode(method_name='get_virtual_price'))]
calls += [
(pool.pool_address, contract.encode(method_name='balances', arguments=[i]))
for i in range(len(pool.assets))
]
output = multicall_2(
ethereum=self._ethereum,
require_success=False,
calls=calls,
)
# Check that the output has the correct structure
if not all([len(call_result) == 2 for call_result in output]):
log.debug(
f'Failed to query contract methods while finding curve pool price. '
f'Not every outcome has length 2. {output}',
)
return None
# Check that all the requests were successful
if not all([contract_output[0] for contract_output in output]):
log.debug(f'Failed to query contract methods while finding curve price. {output}')
return None
# Deserialize information obtained in the multicall execution
data = []
# https://github.com/PyCQA/pylint/issues/4739
virtual_price_decoded = contract.decode(output[0][1], 'get_virtual_price') # pylint: disable=unsubscriptable-object # noqa: E501
if not _check_curve_contract_call(virtual_price_decoded):
log.debug(f'Failed to decode get_virtual_price while finding curve price. {output}')
return None
data.append(FVal(virtual_price_decoded[0])) # pylint: disable=unsubscriptable-object
for i in range(len(pool.assets)):
amount_decoded = contract.decode(output[i + 1][1], 'balances', arguments=[i])
if not _check_curve_contract_call(amount_decoded):
log.debug(f'Failed to decode balances {i} while finding curve price. {output}')
return None
# https://github.com/PyCQA/pylint/issues/4739
amount = amount_decoded[0] # pylint: disable=unsubscriptable-object
normalized_amount = token_normalized_value_decimals(amount, tokens[i].decimals)
data.append(normalized_amount)
# Prices and data should verify this relation for the following operations
if len(prices) != len(data) - 1:
log.debug(
f'Length of prices {len(prices)} does not match len of data {len(data)} '
f'while querying curve pool price.',
)
return None
# Total number of assets price in the pool
total_assets_price = sum(map(operator.mul, data[1:], prices))
if total_assets_price == 0:
log.error(
f'Curve pool price returned unexpected data {data} that lead to a zero price.',
)
return None
# Calculate weight of each asset as the proportion of tokens value
weights = (data[x + 1] * prices[x] / total_assets_price for x in range(len(tokens)))
assets_price = FVal(sum(map(operator.mul, weights, prices)))
return (assets_price * FVal(data[0])) / (10 ** lp_token.decimals)
def find_yearn_price(
self,
token: EthereumToken,
) -> Optional[Price]:
"""
Query price for a yearn vault v2 token using the pricePerShare method
and the price of the underlying token.
"""
assert self._ethereum is not None, 'Inquirer ethereum manager should have been initialized' # noqa: E501
maybe_underlying_token = GlobalDBHandler().fetch_underlying_tokens(token.ethereum_address)
if maybe_underlying_token is None or len(maybe_underlying_token) != 1:
log.error(f'Yearn vault token {token} without an underlying asset')
return None
underlying_token = EthereumToken(maybe_underlying_token[0].address)
underlying_token_price = self.find_usd_price(underlying_token)
# Get the price per share from the yearn contract
contract = EthereumContract(
address=token.ethereum_address,
abi=YEARN_VAULT_V2_ABI,
deployed_block=0,
)
try:
price_per_share = contract.call(self._ethereum, 'pricePerShare')
return Price(price_per_share * underlying_token_price / 10 ** token.decimals)
except (RemoteError, BlockchainQueryError) as e:
log.error(f'Failed to query pricePerShare method in Yearn v2 Vault. {str(e)}')
return None
@staticmethod
def get_fiat_usd_exchange_rates(currencies: Iterable[Asset]) -> Dict[Asset, Price]:
"""Gets the USD exchange rate of any of the given assets
In case of failure to query a rate it's returned as zero"""
rates = {A_USD: Price(FVal(1))}
for currency in currencies:
try:
rates[currency] = Inquirer()._query_fiat_pair(A_USD, currency)
except RemoteError:
rates[currency] = Price(ZERO)
return rates
@staticmethod
def query_historical_fiat_exchange_rates(
from_fiat_currency: Asset,
to_fiat_currency: Asset,
timestamp: Timestamp,
) -> Optional[Price]:
assert from_fiat_currency.is_fiat(), 'fiat currency should have been provided'
assert to_fiat_currency.is_fiat(), 'fiat currency should have been provided'
# Check cache
price_cache_entry = GlobalDBHandler().get_historical_price(
from_asset=from_fiat_currency,
to_asset=to_fiat_currency,
timestamp=timestamp,
max_seconds_distance=DAY_IN_SECONDS,
)
if price_cache_entry:
return price_cache_entry.price
try:
prices_map = get_historical_xratescom_exchange_rates(
from_asset=from_fiat_currency,
time=timestamp,
)
except RemoteError:
return None
# Since xratecoms has daily rates let's save at timestamp of UTC day start
for asset, asset_price in prices_map.items():
GlobalDBHandler().add_historical_prices(entries=[HistoricalPrice(
from_asset=from_fiat_currency,
to_asset=asset,
source=HistoricalPriceOracle.XRATESCOM,
timestamp=timestamp_to_daystart_timestamp(timestamp),
price=asset_price,
)])
if asset == to_fiat_currency:
rate = asset_price
log.debug('Historical fiat exchange rate query succesful', rate=rate)
return rate
@staticmethod
def _query_fiat_pair(base: Asset, quote: Asset) -> Price:
"""Queries the current price between two fiat assets
If a current price is not found but a cached price within 30 days is found
then that one is used.
May raise RemoteError if a price can not be found
"""
if base == quote:
return Price(FVal('1'))
now = ts_now()
# Check cache for a price within the last 24 hrs
price_cache_entry = GlobalDBHandler().get_historical_price(
from_asset=base,
to_asset=quote,
timestamp=now,
max_seconds_distance=DAY_IN_SECONDS,
)
if price_cache_entry:
return price_cache_entry.price
# Use the xratescom query and save all prices in the cache
price = None
try:
price_map = get_current_xratescom_exchange_rates(base)
for quote_asset, quote_price in price_map.items():
if quote_asset == quote:
# if the quote asset price is found return it
price = quote_price
GlobalDBHandler().add_historical_prices(entries=[HistoricalPrice(
from_asset=base,
to_asset=quote_asset,
source=HistoricalPriceOracle.XRATESCOM,
timestamp=timestamp_to_daystart_timestamp(now),
price=quote_price,
)])
if price: # the quote asset may not be found
return price
except RemoteError:
pass # price remains None
# query backup api
price = _query_currency_converterapi(base, quote)
if price is not None:
return price
# Check cache
price_cache_entry = GlobalDBHandler().get_historical_price(
from_asset=base,
to_asset=quote,
timestamp=now,
max_seconds_distance=MONTH_IN_SECONDS,
)
if price_cache_entry:
log.debug(
f'Could not query online apis for a fiat price. '
f'Used cached value from '
f'{(now - price_cache_entry.timestamp) / DAY_IN_SECONDS} days ago.',
base_currency=base.identifier,
quote_currency=quote.identifier,
price=price_cache_entry.price,
)
return price_cache_entry.price
# else
raise RemoteError(
f'Could not find a current {base.identifier} price for {quote.identifier}',
)
| StarcoderdataPython |
3330133 | <reponame>trivenews/central
import factory
from factory import fuzzy
from django.utils import timezone
from .models import User
from ..wallet.factories import WalletFactory
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
username = factory.Sequence(lambda n: 'user%s' % n)
first_name = fuzzy.FuzzyText(length=5)
last_name = fuzzy.FuzzyText(length=10)
email = factory.LazyAttribute(lambda o: <EMAIL>' % o.username)
date_joined = factory.LazyFunction(timezone.now)
wallet = factory.SubFactory(WalletFactory)
@classmethod
def _create(cls, *args, **kwargs):
obj = super()._create(*args, **kwargs)
obj.wallet.owner = obj
obj.wallet.save()
return obj
| StarcoderdataPython |
5042024 | <filename>urllib_request_basicauth.py
#!/usr/bin/env python3
import urllib.request
import getpass
import os
import json
import pandas as pd
jq = pd.read_csv('jenkins_queries.csv')
url = jq.loc[3,'URL']
auth_user = "ericnelson"
auth_token = os.environ['JENKINS_TOKEN']
#HTTPBasicAuthHandler setup
passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, auth_user, auth_token)
authhandler = urllib.request.HTTPBasicAuthHandler(passman)
opener = urllib.request.build_opener(authhandler)
urllib.request.install_opener(opener)
#request for response
res = urllib.request.urlopen(url)
res_body = res.read()
print(res_body.decode('utf-8'))
| StarcoderdataPython |
5127760 | import numpy as np
import matplotlib.pyplot as plt
__all__ = ['tracer', 'np']
def tracer_plan_cartesien():
plt.arrow(-1, 0, 2, 0, width=0.005, head_width=0.05, color='k')
plt.arrow(0, -1, 0, 2, width=0.005, head_width=0.05, color='k')
plt.grid('on')
def tracer(x, y, **kwargs):
tracer_plan_cartesien()
plt.plot(x, y, **kwargs)
plt.legend()
def moyenne(x):
return np.mean(x)
def range(*args):
return list(np.arange(*args)) | StarcoderdataPython |
5002981 | import pygame
import constants as c
import math
class Powerup:
def __init__(self, game, surface, pos=(0, 0)):
self.radius = 24
self.age = 0
self.game = game
self.x, self.y = pos
self.y_offset = -self.y
self.surface = surface
self.shadow = pygame.Surface((self.radius*2, self.radius*2))
self.shadow.fill((255, 255, 255))
self.shadow.set_colorkey((255, 255, 255))
self.shadow.set_alpha(40)
self.landed = False
pygame.draw.circle(self.shadow, (0, 0, 0), (self.radius, self.radius), self.radius)
self.glow = self.game.get_static(c.image_path("glow.png"))
def update(self, dt, events):
self.age += dt
if self.landed:
self.y_offset = 6 * math.sin(self.age * 6)
else:
self.y_offset += 600*dt
if self.y_offset >= 0:
self.y_offset = 0
self.landed = True
self.game.current_scene.shake(5)
self.game.powerup_land_noise.play()
self.check_collisions()
def draw(self, surface, offset=(0, 0)):
x = self.x + offset[0] - self.glow.get_width()//2
y = self.y + offset[1] - self.glow.get_height()//2 + self.y_offset - 30
surface.blit(self.glow, (x, y), special_flags = pygame.BLEND_RGBA_ADD)
width = self.shadow.get_width()
width -= 10
if (int(width + self.y_offset/2)) > 0:
shadow = pygame.transform.scale(self.shadow, (int(width + self.y_offset/2), int(width + self.y_offset/2)))
x = self.x + offset[0] - shadow.get_width()//2
y = self.y + offset[1] - shadow.get_height()//2
surface.blit(shadow, (x, y))
x = self.x + offset[0] - self.surface.get_width()//2
y = self.y + offset[1] - self.surface.get_height()//2 + self.y_offset - 30
surface.blit(self.surface, (x, y))
def check_collisions(self):
for player in self.game.current_scene.players:
if c.mag(player.x - self.x, player.y - self.y) < player.radius + self.radius and self.landed:
self.collected_by(player)
break
def collected_by(self, player):
self.game.powerup_collect_noise.play()
self.game.powerups.remove(self)
class FastSpinPowerup(Powerup):
def __init__(self, game, pos=(0, 0)):
surface = game.get_static(c.image_path("spin.png"))
super().__init__(game, surface, pos=pos)
def collected_by(self, player):
super().collected_by(player)
FastSpin(player)
class SlipperySocksPowerup(Powerup):
def __init__(self, game, pos=(0, 0)):
surface = game.get_static(c.image_path("socks.png"))
super().__init__(game, surface, pos=pos)
def collected_by(self, player):
super().collected_by(player)
SlipperySocks(player)
class DoubleShotPowerup(Powerup):
def __init__(self, game, pos=(0, 0)):
surface = game.get_static(c.image_path("double.png"))
super().__init__(game, surface, pos=pos)
def collected_by(self, player):
super().collected_by(player)
DoubleShot(player)
class BouncyPowerup(Powerup):
def __init__(self, game, pos=(0, 0)):
surface = game.get_static(c.image_path("bouncy.png"))
super().__init__(game, surface, pos=pos)
def collected_by(self, player):
super().collected_by(player)
Bouncy(player)
class FastShootingPowerup(Powerup):
def __init__(self, game, pos=(0, 0)):
surface = game.get_static(c.image_path("mandible.png"))
super().__init__(game, surface, pos=pos)
def collected_by(self, player):
super().collected_by(player)
FastShooting(player)
class Effect:
def __init__(self, owner):
self.age = 0
self.owner = owner
found = False
for item in self.owner.effects:
if item.id == self.id:
item.age = 0
found = True
break
if not found:
self.owner.effects.append(self)
def update(self, dt, events):
self.age += dt
if self.age > self.duration:
self.end()
def end(self):
self.owner.effects.remove(self)
class FastSpin(Effect):
def __init__(self, owner):
self.id=c.FAST_SPINNING
self.duration = 25
super().__init__(owner)
self.name = "Caffeine"
self.description = "Spin to win"
self.mult = 2
self.icon = pygame.transform.scale2x(self.owner.game.get_static(c.image_path("spin_icon.png")))
class SlipperySocks(Effect):
def __init__(self, owner):
self.id=c.SLIPPERY_SOCKS
self.name = "Slippery Socks"
self.description = "There better be a bulk discount"
self.duration = 18
super().__init__(owner)
self.icon = pygame.transform.scale2x(self.owner.game.get_static(c.image_path("socks_icon.png")))
class DoubleShot(Effect):
def __init__(self, owner):
self.id=c.DOUBLE_SHOT
self.name = "Double Shot"
self.description = "For that special someone you really want to shoot twice"
self.duration = 18
super().__init__(owner)
self.icon = pygame.transform.scale2x(self.owner.game.get_static(c.image_path("double_icon.png")))
class Bouncy(Effect):
def __init__(self, owner):
self.id=c.BOUNCY
self.name = "Bouncy Bullets"
self.description = "When the collision code works correctly"
self.duration = 18
super().__init__(owner)
self.icon = pygame.transform.scale2x(self.owner.game.get_static(c.image_path("bouncy_icon.png")))
class FastShooting(Effect):
def __init__(self, owner):
self.id=c.FAST_SHOOTING
self.name = "<NAME>"
self.description = "Improves regurigation efficiency by 80% or more"
self.duration = 25
super().__init__(owner)
self.icon = pygame.transform.scale2x(self.owner.game.get_static(c.image_path("mandible_icon.png")))
| StarcoderdataPython |
3220743 | <reponame>Guilehm/investir-nao-da-xp
from django.contrib import admin
from communications.models import Communication
@admin.register(Communication)
class CommunicationAdmin(admin.ModelAdmin):
list_display = ('id', 'method', 'error', 'date_added')
list_filter = ('date_added', 'method')
search_fields = ('id', 'data')
| StarcoderdataPython |
3222872 |
# Input: A pickle file with multiple things pickled in it:
# Output: A text file of python pickles in a single file
# Convert some pickles to Python
import cPickle
from pprint import pprint
import sys
import string
if len(sys.argv) != 3 :
print "Usage: pickles2pythondicts.py pickleddata.pickle pythontable.py"
sys.exit(1)
pickle_file = file(sys.argv[1], 'r')
pythontable_file = file(sys.argv[2], 'w')
while 1:
try :
f = cPickle.load(pickle_file)
if (type(f)==type({}) and f.has_key("CONTENTS")) :
print "Stripping outer shell with CONTENTS"
f = f["CONTENTS"]
pprint(f, pythontable_file)
except EOFError:
break
print("All done")
| StarcoderdataPython |
8052385 | # SPDX-License-Identifier: BSD-3-Clause
from ..util import *
__all__ = (
'sim_case',
'run_sims',
)
def _collect_sims(*, pkg):
from pkgutil import walk_packages
from importlib import import_module
from inspect import getmembers
from os import path
from amaranth.sim import Simulator
def _case_predicate(member):
return (
isinstance(member, tuple) and
len(member) == 2 and
isinstance(member[0], Simulator) and
isinstance(member[1], str)
)
sims = []
if not path.exists(pkg):
raise RuntimeError(f'The package {pkg} does not exist, unable to attempt to import test cases')
for _, name, is_pkg in walk_packages(path = (pkg,), prefix = f'{pkg.replace("/", ".")}.'):
if not is_pkg:
pkg_import = import_module(name)
cases_variables = getmembers(pkg_import, _case_predicate)
sims.append({
'name' : name,
'cases': [case for _, case in cases_variables]
})
return sims
def sim_case(*, domains, dut, platform = None, engine = 'pysim'):
def _reg_sim(func):
from amaranth.sim import Simulator
from amaranth.hdl.ir import Fragment
sim = Simulator(
Fragment.get(dut, platform = platform),
engine = engine
)
for dom, clk in domains:
sim.add_clock(1 / clk, domain = dom)
for case, dom in func(sim, dut):
sim.add_sync_process(case, domain = dom)
return (sim, getattr(func, '__name__'))
return _reg_sim
def run_sims(*, pkg, result_dir, skip = []):
from os import path, mkdir, makedirs
if not path.exists(result_dir):
mkdir(result_dir)
for sim in _collect_sims(pkg = pkg):
log(f'Running simulation {sim["name"]}...')
out_dir = path.join(result_dir, sim['name'].replace('.', '/'))
if not path.exists(out_dir):
makedirs(out_dir, exist_ok = True)
for case, name in sim['cases']:
inf(f' => Running {name}')
with case.write_vcd(path.join(out_dir, f'{name}.vcd')):
case.reset()
case.run()
| StarcoderdataPython |
1981990 | <reponame>ccebrecos/btc-scripting
import sys
from btc_framework.bitcoin import OP_AND, OP_OR, OP_XOR, OP_1, OP_0
from btc_framework.bitcoin import SignableTx, TxInput, TxOutput, script, \
address
if __name__ == "__main__":
# read params
keys_base58 = sys.argv[1:]
keys = [address.WIF.decode(key) for key in keys_base58]
sign_key = keys[0]
# transaction related params
utxo_id = bytes().fromhex(
"8bcd1b54979492c08fb21f2d9f70307c79277e8f60cca606f7dff5a089216399")
utxo_vout, utxo_value = 0, 9.99499999
fees = 0.005
to_pay = utxo_value - fees
to_pay_addr = address.P2PKH(public_key=sign_key.public_key)
# create new transaction
transaction = SignableTx()
# fill transaction
# add inputs
in0 = TxInput(utxo_id, utxo_vout, script.sig.P2PKH())
in0.script.input = in0
transaction.add_input(in0)
# add outputs
and_script = script.Script([OP_1, OP_AND])
or_script = script.Script([OP_0, OP_OR])
xor_script = script.Script([OP_0, OP_XOR])
transaction.add_output(TxOutput(and_script, btc=to_pay))
# sign
transaction.inputs[0].script.sign(key=sign_key.private_key)
# return transaction created
print(transaction)
print(transaction.serialize().hex())
# SPEND THE PREVIOUS TRANSACTION
# transaction related params
utxo_id, utxo_vout, to_pay = transaction.id, 0, to_pay - fees
# create new transaction
spendtx = SignableTx()
# fill transaction
# add inputs
spend_script = script.Script([OP_1])
spendtx.add_input(TxInput(utxo_id, utxo_vout, spend_script))
# add outputs
spendtx.add_output(TxOutput(to_pay_addr.script, btc=to_pay))
# return transaction created
print(spendtx)
print(spendtx.serialize().hex())
| StarcoderdataPython |
5050869 | # Author: <NAME>
# Problem 004
# Find the largest palindrome made from the product of two 3-digit numbers\
def isPalindrone(number):
'''Check each digit of the number to check for a palimdrone'''
str_number = str(number)
digits = list(str_number)
length = len(digits)
for i in range(0, length):
if digits[i] != digits[length-1-i]:
return False
break
return True
largest = 0
for i in range(100,1000):
for j in range(100,1000):
product = i*j
if isPalindrone(product) and product > largest:
print("The new largest palindrome: " + str(product))
largest = product
print("The largest palindrome is: " + str(largest))
| StarcoderdataPython |
1786062 | <reponame>sebnil/internet-uptime
import time
import urllib.request
from datetime import date
def internet_on():
try:
urllib.request.urlopen('http://google.se', timeout=5)
return 1
except urllib.request.URLError:
print('URLError')
except:
print('could not do urlopen')
return 0
if __name__ == "__main__":
now = date.today()
filename = '{:04d}-{:02d}-{:02d} uptime.csv'.format(now.year, now.month, now.day)
with open(filename, 'a') as f:
for i in range(0, 5):
internet_ok = internet_on()
if internet_ok:
break
else:
print('internet not ok. loop {}. trying again'.format(i))
iso_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
line = '{:.6f},{},{}\n'.format(time.time(), iso_time, internet_ok)
print(line)
f.write(line)
f.flush()
| StarcoderdataPython |
8081734 | <reponame>LpLegend/zenml<gh_stars>1-10
import inspect
from abc import abstractmethod
from tfx.orchestration import metadata
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration.local.local_dag_runner import LocalDagRunner
from playground.datasources.base_datasource import BaseDatasource
from playground.steps.base_step import BaseStep
from playground.utils.annotations import GenericType
from playground.utils.exceptions import PipelineInterfaceError
Datasource = type("Datasource",
(GenericType,),
{"VALID_TYPES": [BaseDatasource]})
Step = type("Step",
(GenericType,),
{"VALID_TYPES": [BaseStep]})
class BasePipelineMeta(type):
def __new__(mcs, name, bases, dct):
cls = super().__new__(mcs, name, bases, dct)
cls.DATASOURCE_SPEC = dict()
cls.STEP_SPEC = dict()
connect_spec = inspect.getfullargspec(cls.connect)
connect_args = connect_spec.args
if connect_args and connect_args[0] == "self":
connect_args.pop(0)
for arg in connect_args:
arg_type = connect_spec.annotations.get(arg, None)
if isinstance(arg_type, Datasource):
cls.DATASOURCE_SPEC.update({arg: arg_type.type})
elif isinstance(arg_type, Step):
cls.STEP_SPEC.update({arg: arg_type.type})
else:
raise PipelineInterfaceError("") # TODO: fill message
return cls
class BasePipeline(metaclass=BasePipelineMeta):
def __init__(self, *args, **kwargs):
self.__steps = dict()
self.__datasources = dict()
if args:
raise PipelineInterfaceError("") # TODO: Fill
for k, v in kwargs.items():
assert k in self.STEP_SPEC or k in self.DATASOURCE_SPEC
if k in self.STEP_SPEC:
self.__steps.update({k: v}) # TODO: assert class
elif k in self.DATASOURCE_SPEC:
self.__datasources.update({k: v})
else:
raise PipelineInterfaceError("") # TODO: Fill
@abstractmethod
def connect(self, *args, **kwargs):
pass
def run(self):
from tfx.dsl.components.common.importer import Importer
from playground.artifacts.data_artifacts import CSVArtifact
data = Importer(
source_uri="/home/baris/Maiot/zenml/local_test/data",
artifact_type=CSVArtifact).with_id("datasource")
self.connect(datasource=data.outputs.result, **self.__steps)
step_list = [data] + \
[s.get_component() for s in self.__steps.values()]
created_pipeline = tfx_pipeline.Pipeline(
pipeline_name='pipeline_name',
pipeline_root='/home/baris/Maiot/zenml/local_test/new_zenml/',
components=step_list,
enable_cache=False,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
'/home/baris/Maiot/zenml/local_test/new_zenml/db'),
beam_pipeline_args=[
'--direct_running_mode=multi_processing',
'--direct_num_workers=0'])
LocalDagRunner().run(created_pipeline)
| StarcoderdataPython |
3301005 | import struct
from typing import Tuple, Optional, Union
from bxcommon.utils.blockchain_utils.ont.ont_object_hash import OntObjectHash
from bxgateway import ont_constants
from bxgateway.messages.ont.ont_message import OntMessage
from bxgateway.messages.ont.ont_message_type import OntMessageType
class GetDataOntMessage(OntMessage):
MESSAGE_TYPE = OntMessageType.GET_DATA
def __init__(self, magic: Optional[int] = None, inv_type: Optional[Union[int, bytes]] = None,
block: Optional[OntObjectHash] = None, buf: Optional[bytearray] = None):
if buf is None:
buf = bytearray(ont_constants.ONT_HDR_COMMON_OFF + ont_constants.ONT_GET_DATA_MSG_LEN)
self.buf = buf
off = ont_constants.ONT_HDR_COMMON_OFF
if isinstance(inv_type, int):
struct.pack_into("<B", buf, off, inv_type)
else:
# pyre-fixme[6]: Expected `Union[typing.Iterable[int], bytes]` for
# 2nd param but got `Optional[bytes]`.
buf[off: off + ont_constants.ONT_CHAR_LEN] = inv_type
off += ont_constants.ONT_CHAR_LEN
assert block is not None
buf[off:off + ont_constants.ONT_HASH_LEN] = block.get_big_endian()
off += ont_constants.ONT_HASH_LEN
super().__init__(magic, self.MESSAGE_TYPE, off - ont_constants.ONT_HDR_COMMON_OFF, buf)
else:
self.buf = buf
self._memoryview = memoryview(buf)
self._magic = self._command = self._payload_len = self._checksum = None
self._payload = None
self._inv_type = None
self._block = None
def inv_type(self) -> Tuple[int, OntObjectHash]:
if self._inv_type is None:
off = ont_constants.ONT_HDR_COMMON_OFF
self._inv_type, = struct.unpack_from("<B", self.buf, off)
off += ont_constants.ONT_CHAR_LEN
self._block = OntObjectHash(buf=self.buf, offset=off, length=ont_constants.ONT_HASH_LEN)
inv_type, block = self._inv_type, self._block
assert isinstance(inv_type, int)
assert isinstance(block, OntObjectHash)
return inv_type, block
| StarcoderdataPython |
6470049 | import uuid
from django.db import models
from cities.models import City
from users.models import User
class OfferCategory(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(verbose_name='Наименование', max_length=50)
class Meta:
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
def __str__(self):
return self.name
class CloseReason(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50, verbose_name='Причина закрытия')
class Meta:
verbose_name = 'Причина закрытия'
def __str__(self):
return self.name
class Offer(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
MODERATION_STATUSES_CHOICES = (
('ON_MODERATION', 'На модерации'),
('APPROVED', 'Одобрено'),
('REFUSED', 'Отклонено'),
)
author = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="offers")
title = models.CharField(max_length=100)
description = models.TextField(max_length=280)
category = models.ForeignKey(
OfferCategory, on_delete=models.CASCADE, related_name="offers")
is_service = models.BooleanField()
is_used = models.BooleanField()
city = models.ForeignKey(
City, on_delete=models.SET_NULL, blank=True, null=True, related_name="offers")
pub_date = models.DateField(
verbose_name='Дата публикации', auto_now_add=True)
is_private = models.BooleanField(
verbose_name='Приватное/общедоступное предложение')
moderation_statuses = models.CharField(verbose_name='Статус модерации', max_length=50,
choices=MODERATION_STATUSES_CHOICES, default='ON_MODERATION')
is_closed = models.BooleanField(
verbose_name='Открытое/закрытое предложение')
close_reason = models.ForeignKey(CloseReason,
on_delete=models.PROTECT, blank=True, null=True, related_name="offers")
class Meta:
verbose_name = 'Предложение'
verbose_name_plural = 'Предложения'
def nameFile(instance, filename):
return '/'.join(['photos', str(instance.link), filename])
class OfferPhoto(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
offer = models.ForeignKey(Offer, on_delete=models.CASCADE, related_name="photos")
link = models.ImageField(upload_to=nameFile, blank=True, null=True)
class Meta:
verbose_name = 'Фотография предложения'
verbose_name_plural = 'Фотографии предложения'
| StarcoderdataPython |
11319089 | import json, requests, urllib3
from flask import Flask, request, jsonify
from datetime import datetime
import time
import traceback
import os
import redis
import cPickle as pickle
from multiprocessing import Process
def avi_request(avi_api,tenant,api_version='17.2.1'):
cookies=dict()
if 'avi-sessionid' in login.cookies.keys():
cookies['avi-sessionid'] = login.cookies['avi-sessionid']
else:
cookies['sessionid'] = login.cookies['sessionid']
headers = ({'X-Avi-Tenant': '%s' %tenant, 'content-type': 'application/json', 'X-Avi-Version': '%s' %api_version})
return requests.get('https://%s/api/%s' %(avi_controller,avi_api), verify=False, headers = headers,cookies=cookies,timeout=50)
def avi_post(api_url,tenant,payload,api_version='17.2.1'):
cookies=dict()
if 'avi-sessionid' in login.cookies.keys():
cookies['avi-sessionid'] = login.cookies['avi-sessionid']
else:
cookies['sessionid'] = login.cookies['sessionid']
headers = ({"X-Avi-Tenant": "%s" %tenant, 'content-type': 'application/json','referer': 'https://%s' %avi_controller, 'X-CSRFToken': dict(login.cookies)['csrftoken'],'X-Avi-Version':'%s' %api_version})
cookies['csrftoken'] = login.cookies['csrftoken']
return requests.post('https://%s/api/%s' %(avi_controller,api_url), verify=False, headers = headers,cookies=cookies, data=json.dumps(payload),timeout=50)
def serviceengine_inventory_multiprocess(r,cloud_list,uuid_list,tenant_list,runtime):
try:
se_inventory_cache_start = time.time()
proc = []
for t in tenant_list:
p = Process(target = serviceengine_inventory_child, args = (r,cloud_list,uuid_list,tenant_list,t,runtime,))
p.start()
proc.append(p)
if len(proc) > 10:
for p in proc:
p.join()
proc = []
for p in proc:
p.join()
#----- get keys, consolidate then delete
inv_keys = r.keys('temp_se_dict_*')
se_dict = {}
for k in inv_keys:
_1 = pickle.loads(r.get(k))
se_dict.update(_1)
r.delete(k)
se_results = {}
se_results['TOTAL_SERVICEENGINES'] = len(se_dict)
for v in se_dict:
if se_dict[v]['tenant'] not in se_results:
se_results[se_dict[v]['tenant']] = 1
else:
se_results[se_dict[v]['tenant']] += 1
r.set('se_results', pickle.dumps(se_results))
r.set('se_dict', pickle.dumps(se_dict))
temp_total_time = str(time.time()-se_inventory_cache_start)
print(str(datetime.now())+' =====> Refresh of SE Inventory Cache took %s seconds' %temp_total_time)
except:
print(str(datetime.now())+' '+avi_controller+': func serviceengine_inventory_multiprocess encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
def serviceengine_inventory_child(r,cloud_list,uuid_list,tenant_list,t,runtime):
try:
se_inventory_cache_start = time.time()
#if runtime == True:
if t in runtime:
_runtime = True
_rt = ',vs_refs,mgmt_vnic&join_subresources=runtime'
else:
_runtime = False
_rt = ''
se_inv = avi_request('serviceengine?fields=cloud_ref,tenant_ref,se_group_ref%s&page_size=200&include_name=true' %_rt,t)
if se_inv.status_code == 403:
print(str(datetime.now())+' =====> ERROR serviceengine_inventory_child: %s' %se_inv.text)
else:
se_inv = se_inv.json()
resp = se_inv
page_number = 1
se_dict = {}
while 'next' in resp:
page_number += 1
resp = avi_request('serviceengine?fields=cloud_ref,tenant_ref,se_group_ref%s&page_size=200&include_name=true&page='+str(page_number) %_rt,t).json()
for v in resp['results']:
se_inv['results'].append(v)
if se_inv['count'] > 0:
for v in se_inv['results']:
if v['tenant_ref'].rsplit('#')[1] in tenant_list:
if v['cloud_ref'].rsplit('#')[1].lower() in cloud_list or '*' in cloud_list:
if v['uuid'] in uuid_list or '*' in uuid_list:
if v['uuid'] not in se_dict:
se_dict[v['uuid']] = {}
se_dict[v['uuid']]['name'] = v['name']
se_dict[v['uuid']]['tenant'] = v['tenant_ref'].rsplit('#')[1]
se_dict[v['uuid']]['cloud'] = v['cloud_ref'].rsplit('#')[1]
se_dict[v['uuid']]['se_group'] = v['se_group_ref'].rsplit('#')[1]
if _runtime == True:
se_dict[v['uuid']]['runtime']={}
if 'vs_refs' in v:
se_dict[v['uuid']]['runtime']['virtualservice_count'] = len(v['vs_refs'])
else:
se_dict[v['uuid']]['runtime']['virtualservice_count'] = 0
if 'version' in v['runtime']:
se_dict[v['uuid']]['runtime']['version'] = v['runtime']['version'].split(' ',1)[0]
if 'se_connected' in v['runtime']:
se_dict[v['uuid']]['runtime']['se_connected'] = v['runtime']['se_connected']
if 'power_state' in v['runtime']:
se_dict[v['uuid']]['runtime']['power_state'] = v['runtime']['power_state']
if 'migrate_state' in v['runtime']:
se_dict[v['uuid']]['runtime']['migrate_state'] = v['runtime']['migrate_state']
if 'oper_status' in v['runtime']:
se_dict[v['uuid']]['runtime']['oper_status'] = v['runtime']['oper_status']['state']
if 'mgmt_vnic' in v:
se_dict[v['uuid']]['runtime']['mgmt_ip'] = v['mgmt_vnic']['vnic_networks'][0]['ip']['ip_addr']['addr']
else:
if v['tenant_ref'].rsplit('#')[1] == 'admin':
se_dict[v['uuid']]['tenant'] = 'admin'
r.set('temp_se_dict_'+t,pickle.dumps(se_dict))
temp_total_time = str(time.time()-se_inventory_cache_start)
print(str(datetime.now())+' =====> Refresh of SE Inventory Cache took %s seconds for tenant %s' %(temp_total_time,t))
except:
print(str(datetime.now())+' '+avi_controller+': func serviceengine_inventory_child encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
def serviceengine_metrics_multiprocess(r,uuid_list,se_metric_list,tenant_list,runtime):
try:
discovered_se = []
metric_resp = []
print(str(datetime.now())+' =====> Refreshing SE Static Metrics Cache')
se_static_metric_cache_start = time.time()
se_dict = pickle.loads(r.get('se_dict'))
proc = []
for t in tenant_list:
p = Process(target = serviceengine_metrics_child, args = (r,uuid_list,se_metric_list,se_dict,t,))
p.start()
proc.append(p)
if len(proc) > 10:
for p in proc:
p.join()
proc = []
for p in proc:
p.join()
metric_keys = r.keys('temp_se_stat_*')
for k in metric_keys:
_1 = pickle.loads(r.get(k))
metric_resp.append(_1['series']['collItemRequest:AllSEs'])
r.delete(k)
#prom_metrics = ''
prom_metrics = ['\n']
se_metrics_runtime = pickle.loads(r.get('se_metrics_runtime'))
for _resp in metric_resp:
for s in _resp:
if s in se_dict:
if s not in discovered_se:
discovered_se.append(s)
for m in _resp[s]:
if 'data' in m:
temp_tags = ''
metric_name = m['header']['name'].replace('.','_').replace('-','_')
metric_description = m['header']['metric_description']
metric_value = m['data'][0]['value']
temp_payload = {}
temp_payload['name'] = se_dict[s]['name']
temp_payload['uuid'] = s
temp_payload['cloud'] = se_dict[s]['cloud']
temp_payload['se_group'] = se_dict[s]['se_group']
temp_payload['tenant'] = m['header']['tenant_ref'].rsplit('#')[1]
temp_payload['entity_type'] = 'serviceengine'
for e in temp_payload:
temp_tags=temp_tags+(str(e+'="'+temp_payload[e]+'",'))
temp_tags = '{'+temp_tags.rstrip(',')+'}'
#prom_metrics = prom_metrics+'\n'+'# HELP '+metric_name+' '+metric_description
#prom_metrics = prom_metrics+'\n'+'# TYPE '+metric_name+' gauge'
#prom_metrics = prom_metrics+'\n'+metric_name+''+temp_tags+' '+str(metric_value)
prom_metrics.append('%s 01# HELP %s %s' %(metric_name,metric_name, metric_description))
prom_metrics.append('%s 02# TYPE %s gauge' %(metric_name,metric_name))
prom_metrics.append('%s %s %s' %(metric_name,temp_tags,str(metric_value)))
if 'runtime' in se_dict[s]:
for m in se_dict[s]['runtime']:
temp_payload = {}
temp_payload['name'] = se_dict[s]['name']
temp_payload['uuid'] = s
temp_payload['cloud'] = se_dict[s]['cloud']
temp_payload['se_group'] = se_dict[s]['se_group']
temp_payload['tenant'] = se_dict[s]['tenant']
temp_payload['entity_type'] = 'serviceengine'
se_metrics_runtime.append(m)
temp_tags = ''
if type(se_dict[s]['runtime'][m]) != int:
temp_payload[m] = str(se_dict[s]['runtime'][m])
int_value = False
else:
int_value = True
for e in temp_payload:
temp_tags=temp_tags+(str(e+'="'+temp_payload[e]+'",'))
temp_tags = '{'+temp_tags.rstrip(',')+'}'
prom_metrics.append('%s 01# HELP %s' %(m,m))
prom_metrics.append('%s 02# TYPE %s gauge' %(m,m))
if int_value == False:
prom_metrics.append('%s %s %s' %(m,temp_tags,str(1)))
else:
prom_metrics.append('%s %s %s' %(m,temp_tags,str(se_dict[s]['runtime'][m])))
##----- return vscount for SE
#metric_name = 'vscount'
#metric_value = se_dict[s]['vscount']
#temp_payload = {}
#temp_payload['name'] = se_dict[s]['name']
#temp_payload['uuid'] = s
#temp_payload['cloud'] = se_dict[s]['cloud']
#temp_payload['se_group'] = se_dict[s]['se_group']
#temp_payload['tenant'] = se_dict[s]['tenant']
#temp_payload['entity_type'] = 'serviceengine'
#temp_tags = ''
#for e in temp_payload:
# temp_tags=temp_tags+(str(e+'="'+temp_payload[e]+'",'))
#temp_tags = '{'+temp_tags.rstrip(',')+'}'
#prom_metrics.append('%s 01# HELP %s' %(m,m))
#prom_metrics.append('%s 02# TYPE %s gauge' %(m,m))
#prom_metrics.append('%s %s %s' %(metric_name,temp_tags,str(metric_value)))
se_metrics_runtime = list(set(se_metrics_runtime))
r.set('se_metrics_runtime',pickle.dumps(se_metrics_runtime))
#prom_metrics = prom_metrics+'\n'
#se_metrics = prom_metrics
prom_metrics = list(set(prom_metrics))
prom_metrics = sorted(prom_metrics)
for idx, item in enumerate(prom_metrics):
if '01#' in item:
item = item.split('01',1)[1]
prom_metrics[idx] = item
elif '02#' in item:
item = item.split('02',1)[1]
prom_metrics[idx] = item
prom_metrics.append('\n')
_se_metrics = '\n'.join(prom_metrics)
r.set('se_polling', 'False')
missing_metrics = []
for _s in se_dict:
if se_dict[_s]['name'] not in _se_metrics:
_a = se_dict[_s]['tenant']+' : '+se_dict[_s]['name']
missing_metrics.append(_s)
r.set('se_missing_metrics', pickle.dumps(missing_metrics))
r.set('se_metrics', pickle.dumps(prom_metrics))
temp_total_time = str(time.time()-se_static_metric_cache_start)
print(str(datetime.now())+' =====> Refresh of SE Metrics Cache took %s seconds' %temp_total_time)
except:
r.set('se_polling', 'False')
print(str(datetime.now())+' : func serviceengine_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
def serviceengine_metrics_child(r,uuid_list,se_metric_list,se_dict,t):
try:
se_static_metric_cache_start = time.time()
if '*' in uuid_list:
entity_uuid = '*'
else:
_temp_uuid_list = []
for e in uuid_list:
if se_dict[e]['tenant'] == t:
_temp_uuid_list.append(e)
entity_uuid = ','.join(_temp_uuid_list)
payload = {
"metric_requests": [
{
"step": 300,
"limit": 1,
"aggregate_entity": False,
"entity_uuid": entity_uuid,
"id": "collItemRequest:AllSEs",
"metric_id": se_metric_list
}
]}
se_stat = avi_post('analytics/metrics/collection?pad_missing_data=false&include_refs=true&include_name=true', t, payload).json()
r.set('temp_se_stat_'+t,pickle.dumps(se_stat))
temp_total_time = str(time.time()-se_static_metric_cache_start)
print(str(datetime.now())+' =====> Refresh of SE Metrics Cache took %s seconds for tenant %s' %(temp_total_time,t))
except:
print(str(datetime.now())+' : func servicengine_metrics_child encountered an error for tenant: '+t)
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
def refresh_serviceengine_metrics(r,avi_login,controller):
try:
global login
login = avi_login
global avi_controller
avi_controller = controller
r.set('se_last_poll_start_time', time.time())
cloud_list = []
_cloud_list = pickle.loads(r.get('se_cloud'))
for c in _cloud_list:
cloud_list.append(c)
#---
uuid_list = []
_uuid_list = pickle.loads(r.get('se_entity_uuid'))
if '*' in _uuid_list:
uuid_list = '*'
else:
for u in _uuid_list:
uuid_list.append(u)
#---
tenant_list = []
_tenant_list = pickle.loads(r.get('se_tenant'))
for t in _tenant_list:
tenant_list.append(t)
#---
se_metric_list = []
_se_metric_list = pickle.loads(r.get('se_metric_id'))
for s in _se_metric_list:
se_metric_list.append(s)
se_metric_list = ','.join(se_metric_list)
#---
runtime = pickle.loads(r.get('se_runtime'))
#---
serviceengine_inventory_multiprocess(r,cloud_list,uuid_list,tenant_list,runtime)
serviceengine_metrics_multiprocess(r,uuid_list,se_metric_list,tenant_list,runtime)
r.set('se_last_poll_time', time.time())
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
| StarcoderdataPython |
356874 | <reponame>metataro/DirectFeedbackAlignment<gh_stars>1-10
from distutils.core import setup
from Cython.Build import cythonize
import numpy as np
setup(
name='im2col',
ext_modules=cythonize("network/utils/im2col_cython.pyx"),
include_dirs=[np.get_include()]
) | StarcoderdataPython |
1864015 | from rest_framework import serializers
from ..models import CartItem, Order
from ..serializers.carts import CartItemSerializer
class OrderSerializer(serializers.ModelSerializer):
order_items = CartItemSerializer(many=True)
class Meta:
model = Order
fields = (
'pk',
'ordered_at',
'order_items',
'order_status',
'payment_price',
)
def create(self, validated_data):
status = validated_data['order_status']
user = self.context['request'].user
order = Order.objects.create(
user=user,
order_status=status,
)
for cartitem in CartItem.objects.filter(cart__user=user):
cartitem.cart = None
cartitem.order = order
cartitem.save(force_update=True)
return order
def update(self, instance, validated_data):
status = validated_data['order_status']
instance.order_status = status
instance.save(force_update=True)
return instance
| StarcoderdataPython |
1602919 | #!/usr/bin/env python
import sys
import os
import json
from huvr_client import Client
from huvr_client.helpers import make_base_directory, save_profile_to_file, save_checklist_to_file, save_project_type_to_file
if __name__ == '__main__':
# .-------------------------------.
# | Setup main variables |
# '-------------------------------'
url = "https://demo.huvrdatacloud.com"
username = ""
password = ""
base_directory = make_base_directory(os.getcwd(), "huvrdatacloud", "example2")
# Login and go
client = Client(url=url, verbose=True)
client.login(username, password)
# .--------------------.
# | GET Project Types |
# '--------------------'
(res_code, project_types) = client.project_types()
last_project_type_id = None
if res_code == 200:
# print json.dumps(project_types, indent=4)
for project_type in project_types['results']:
# Build up a directory for this ProjectType
pt_name = "_".join(project_type['name'].split())
working_directory = "{}/{}_{}".format(base_directory, pt_name, project_type['id'])
if not os.path.exists(working_directory):
print("Making Working Directory: [{}]".format(working_directory))
os.makedirs(working_directory)
try:
profile_id = project_type['profiles'][-1]
print("Fetching Profile [{}]".format(profile_id))
(res_code, profile) = client.profiles(profile_id)
if res_code == 200:
save_profile_to_file(profile, working_directory)
except IndexError as e:
print("No profile for Project Type {}".format(pt_name))
try:
checklist_id = project_type['checklists'][-1]
print("Fetching Checklist [{}]".format(checklist_id))
(res_code, checklists) = client.checklists(checklist_id)
if res_code == 200:
save_checklist_to_file(checklists, working_directory)
except IndexError as e:
print("No checklist for Project Type {}".format(pt_name))
save_project_type_to_file(project_type, working_directory)
| StarcoderdataPython |
9704479 | #!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
from ansible.plugins.callback import CallbackBase
from jinja2 import Template
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_native
from tempfile import SpooledTemporaryFile
import json
import sys
import os
import datetime
import time
import requests
import re
from pytz import timezone
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__metaclass__ = type
# Ansible documentation of the module.
DOCUMENTATION = '''
msteam: Can send custom message to MS Team channel using pre-defined MS Team message card as Jinja2 template
short_description: msteam is an Ansible callback plugin intended for use to send message to MS Team channel.
author: <NAME> <<EMAIL>>
'''
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'msteam'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self.playbook_name = None
self.tz = timezone('Canada/Eastern')
self.dt_format = "%Y-%m-%d %H:%M:%S"
self.start_time = datetime.datetime.now(self.tz)
self.extra_vars = None
# If you are executing your playbook from AWX/Tower
# Replace with your Ansible Tower/AWX base url
#self.v_at_base_url = "https://myawx"
# The following variable is used to drive logic based on whether the playbook is executed from Tower or command line.
# By default assume it is executed from command line
self.executed_from_tower = False
# To record whether the playbook variables are retrieved, so that we retrieve them just once.
self.pb_vars_retrieved = False
# default msteam url
self.v_msteam_channel_url = "<replace with your default MS Team Webhook URL>"
# default msteam message card template
self.v_message_template = "templates/msteam_default_msg.json.j2"
# default job status in the begining
self.job_status = "successful"
# If you need to post through proxies, uncomment the following and replace with your proxy URLs.
# self.proxies = {
# "http": "<http-proxy-url>",
# "https": "<https-proxy-url>",
# }
# by default enable msteam notification
self.disable_msteam_post = False
# You can uncomment and customize the set_options method if needed.
# def set_options(self, task_keys=None, var_options=None, direct=None):
# super(CallbackModule, self).set_options(
# task_keys=task_keys, var_options=var_options, direct=direct)
def v2_playbook_on_start(self, playbook):
display.vvv(u"v2_playbook_on_start method is being called")
self.playbook = playbook
self.playbook_name = playbook._file_name
def v2_playbook_on_play_start(self, play):
display.vvv(u"v2_playbook_on_play_start method is being called")
self.play = play
# get variable manager and retrieve extra-vars
vm = play.get_variable_manager()
self.extra_vars = vm.extra_vars
self.play_vars = vm.get_vars(self.play)
# The following is used to retrieve variables defined under group_vars or host_vars.
# If the same variable is defined under both with the same scope, the one defined under host_vars takes precedence.
self.host_vars = vm.get_vars()['hostvars']
if not self.pb_vars_retrieved:
self.get_pb_vars()
# def v2_runner_on_failed(self, result, ignore_errors=False):
#display.vvv(u"v2_runner_on_failed method is being called")
# def v2_runner_on_unreachable(self, result):
# display.vvv(u"v2_runner_on_unreachable method is being called")
# # Event used when host begins execution of a task from version 2.8
# def v2_runner_on_start(self, host, task):
# display.vvv(u"v2_runner_on_start method is being called")
# def v2_runner_on_ok(self, result, ignore_errors=False):
# display.vvv(u"v2_runner_on_ok method is being called")
# def v2_runner_on_skipped(self, result):
# display.vvv(u"v2_runner_on_skipped is being called"
def v2_playbook_on_stats(self, stats):
display.vvv(u"v2_playbook_on_stats method is being called")
if not self.pb_vars_retrieved:
self.get_pb_vars()
hosts = sorted(stats.processed.keys())
self.hosts = hosts
self.summary = {}
self.end_time = datetime.datetime.now(self.tz)
self.duration_time = int(
(self.end_time - self.start_time).total_seconds())
# Iterate trough all hosts to check for failures
for host in hosts:
summary = stats.summarize(host)
self.summary = summary
if summary['failures'] > 0:
self.job_status = "failed"
if summary['unreachable'] > 0:
self.job_status = "failed"
display.vvv(u"summary for host %s :" % host)
display.vvv(str(summary))
# Add code here if logging per host
# Just send a single notification whether it is a failure or success
# Post message to MS Team
if(not self.disable_msteam_post):
self.notify_msteam()
else:
display.vvv(u"Posting to MS Team has been disabled.")
def get_pb_vars(self):
display.vvv(u"get_pb_vars method is being called")
self.pb_vars_retrieved = True
try:
self.tower_job_id = self.play_vars['tower_job_id']
self.tower_job_template_name = self.play_vars['tower_job_template_name']
self.scm_revision = self.play_vars['tower_project_revision']
self.executed_from_tower = True
except Exception as e:
print("WARN: Playbook is not executed from Ansible Tower. Ansible Tower properties will not be available. Details %s" % str(e))
self.tower_job_id = "na"
self.tower_job_template_name = "na"
self.scm_revision = "na"
display.vvv(u"tower_job_id: %s" % self.tower_job_id)
display.vvv(u"tower_job_template_name: %s" %
self.tower_job_template_name)
display.vvv(u"scm_revision: %s" % self.scm_revision)
display.vvv(u"executed_from_tower: %s" % self.executed_from_tower)
# Extract common extra-vars
try:
_disable_msteam_post = self.extra_vars['v_disable_msteam_post']
if(_disable_msteam_post.lower() == 'yes' or _disable_msteam_post.lower() == 'true'):
self.disable_msteam_post = True
except:
display.vvv(
u"Could not retrieve v_disable_msteam_post extra-vars from job")
pass
display.vvv(u"disable_msteam_post: %s" % self.disable_msteam_post)
self.v_environment = "na"
try:
self.v_environment = self.extra_vars['v_environment']
except:
display.vvv(
u"Could not retrieve v_environment extra-vars from job")
pass
display.vvv(u"v_environment: %s" % self.v_environment)
self.v_app_file = "na"
try:
self.v_app_file = self.extra_vars['v_app_file']
except:
display.vvv(u"Could not retrieve v_app_file extra-vars from job")
pass
display.vvv(u"v_app_file: %s" % self.v_app_file)
self.v_host_name = None
try:
self.v_host_name = self.extra_vars['v_host_name']
display.vvv("v_host_name: %s" % self.v_host_name)
except:
display.vvv("Could not retrieve v_host_name extra-vars from job")
pass
display.vvv(u"v_host_name: %s" % self.v_host_name)
self.v_deployment_action = "na"
self.v_instance_name = "na"
try:
self.v_deployment_action = self.extra_vars['v_deployment_action']
except:
display.vvv(
"Could not retrieve deployment related common extra-vars from job")
pass
try:
self.v_instance_name = self.extra_vars['v_instance_name']
except:
display.vvv(
"Could not retrieve WAS Liberty deployment specific extra-vars v_instance_name from job")
pass
display.vvv("v_deployment_action: %s" %
self.v_deployment_action)
display.vvv("v_instance_name: %s" %
self.v_instance_name)
def notify_msteam(self):
display.vvv(u"notify_msteam method is being called")
# check if default v_msteam_channel_url url is provided
try:
_v_msteam_channel_url = self.extra_vars['v_msteam_channel_url']
if (_v_msteam_channel_url != "" and (_v_msteam_channel_url.lower() != "none")):
self.v_msteam_channel_url = _v_msteam_channel_url
except:
display.vvv(
u"v_msteam_channel_url is not passed as extra-vars. Will use default value: %s" % self.v_msteam_channel_url)
pass
# check if success channel url is provided
v_success_channel_url = ""
try:
_v_success_channel_url = self.extra_vars['v_success_channel_url']
if (_v_success_channel_url != "" and (_v_success_channel_url.lower() != "none")):
v_success_channel_url = _v_success_channel_url
except:
display.vvv(
u"v_success_channel_url is not passed as extra-vars. Will use default value: %s" % self.v_msteam_channel_url)
pass
# check if failure channel url is provided
v_failure_channel_url = ""
try:
_v_failure_channel_url = self.extra_vars['v_failure_channel_url']
if (_v_failure_channel_url != "" and (_v_failure_channel_url.lower() != "none")):
v_failure_channel_url = _v_failure_channel_url
except:
display.vvv(
u"v_failure_channel_url is not passed as extra-vars. Will use default value: %s" % self.v_msteam_channel_url)
pass
# check if message template is provided as extra-vars
try:
_v_message_template = self.extra_vars['v_message_template']
if (_v_message_template != "" and (_v_message_template.lower() != "none")):
self.v_message_template = _v_message_template
except:
display.vvv(
u"v_message_template is not passed as extra-vars. Will use the default one")
pass
display.vvv("v_message_template: %s" %
self.v_message_template)
# If you are using Ansible Tower/AWX and want to have reference back
web_url = self.v_at_base_url + \
"/#/jobs/playbook/"+str(self.tower_job_id)
try:
with open(self.v_message_template) as j2_file:
template_obj = Template(j2_file.read())
except Exception as e:
print("ERROR: Exception occurred while reading MS Team message template %s. Exiting... %s" % (
self.v_message_template, str(e)))
sys.exit(1)
rendered_template = template_obj.render(
v_ansible_job_status=self.job_status,
v_ansible_job_id=self.tower_job_id,
v_ansible_scm_revision=self.scm_revision,
v_ansible_job_name=self.tower_job_template_name,
v_ansible_job_started=self.start_time.strftime(self.dt_format),
v_ansible_job_finished=self.end_time.strftime(self.dt_format),
v_ansible_job_elapsed_time=self.duration_time,
v_ansible_host_list=self.hosts,
v_ansible_web_url=web_url,
v_ansible_app_file=self.v_app_file,
v_ansible_deployment_action=self.v_deployment_action,
v_ansible_environment=self.v_environment,
v_ansible_instance_name=self.v_instance_name,
v_ansible_executed_from_tower=self.executed_from_tower
)
try:
with SpooledTemporaryFile(
max_size=0, mode='r+w') as tmpfile:
tmpfile.write(rendered_template)
tmpfile.seek(0)
json_payload = json.load(tmpfile)
display.vvv(json.dumps(json_payload))
except Exception as e:
print("ERROR: Exception occurred while reading rendered template or writing rendered MS Team message template. Exiting... %s" % str(e))
sys.exit(1)
if self.job_status == "successful":
print("INFO: Sending success message to MS Team channel")
if v_success_channel_url != "":
self.v_msteam_channel_url = v_success_channel_url
else:
print("INFO: Sending failure message to MS Team channel")
if v_failure_channel_url != "":
self.v_msteam_channel_url = v_failure_channel_url
display.vvv("v_msteam_channel_url: %s" % self.v_msteam_channel_url)
try:
# using proxy
# response = requests.post(url=self.v_msteam_channel_url,
# data=json.dumps(json_payload), headers={'Content-Type': 'application/json'}, timeout=10, proxies=self.proxies)
# without proxy
response = requests.post(url=self.v_msteam_channel_url,
data=json.dumps(json_payload), headers={'Content-Type': 'application/json'}, timeout=10)
if response.status_code != 200:
raise ValueError('Request to msteam returned an error %s, the response is:\n%s' % (
response.status_code, response.text))
except Exception as e:
print(
"WARN: Exception occurred while sending notification to MS team. %s" % str(e))
| StarcoderdataPython |
3300932 | <gh_stars>1-10
import argparse
import sys
from pathlib import Path
from ruamel.yaml import YAML
from termcolor import cprint
def parse_cli_overides():
"""Parse the command-line arguments.
Parse args from CLI and override config dictionary entries
This function implements the command-line interface of the program.
The interface accepts general command-line arguments as well as
arguments that are specific to a sub-command. The sub-commands are
*preprocess*, *train*, *predict*, and *evaluate*. Specifying a
sub-command is required, as it specifies the task that the program
should carry out.
Returns:
args: The parsed arguments.
"""
# Parse the command-line arguments, but separate the `--config_file`
# option from the other arguments. This way, options can be parsed
# from the config file(s) first and then overidden by the other
# command-line arguments later.
parser = argparse.ArgumentParser(
description='Event Independent Network for Learning 3D Audio Sources.',
add_help=False
)
parser.add_argument('-c', '--config_file', default='./configs/ein_seld/seld.yaml', help='Specify config file', metavar='FILE')
subparsers = parser.add_subparsers(dest='mode')
parser_preproc = subparsers.add_parser('preprocess')
parser_train = subparsers.add_parser('train')
parser_infer = subparsers.add_parser('infer')
subparsers.add_parser('evaluate')
# Require the user to specify a sub-command
subparsers.required = True
parser_preproc.add_argument('--preproc_mode', choices=['extract_data', 'extract_scalar', 'extract_frame_label',
'extract_track_label', 'salsa_extractor'], required=True, help='select preprocessing mode')
parser_preproc.add_argument('--dataset_type', default='train', choices=['train', 'test'],
help='select dataset to preprocess')
parser_preproc.add_argument('--num_workers', type=int, default=8, metavar='N')
parser_preproc.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
parser_train.add_argument('--seed', type=int, default=2022, metavar='N')
parser_train.add_argument('--num_workers', type=int, default=8, metavar='N')
parser_train.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
parser_train.add_argument('--port', type=int, default=12359, metavar='N')
parser_infer.add_argument('--num_workers', type=int, default=8, metavar='N')
parser_infer.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
args = parser.parse_args()
args_dict = vars(args)
cprint("Args:", "green")
for key, value in args_dict.items():
print(f" {key:25s} -> {value}")
yaml = YAML()
yaml.indent(mapping=4, sequence=6, offset=3)
yaml.default_flow_style = False
with open(args.config_file, 'r') as f:
cfg = yaml.load(f)
cprint("Cfg:", "red")
yaml.dump(cfg, sys.stdout, transform=replace_indent)
return args, cfg
def replace_indent(stream):
stream = " " + stream
return stream.replace("\n", "\n ")
| StarcoderdataPython |
4879196 | import logging
import os
from pathlib import Path
from dotenv import load_dotenv
env_path = Path(__file__).resolve().parent.parent / "envs/etl.env"
load_dotenv(dotenv_path=env_path)
logging.basicConfig(filename="logs/etl.log", level="INFO")
logger = logging.getLogger()
logger.setLevel(level="INFO")
dsl = {
"dbname": os.getenv("DB_NAME"),
"user": os.getenv("POSTGRES_USER"),
"password": <PASSWORD>("<PASSWORD>"),
"host": os.environ.get("DB_HOST"),
"port": os.environ.get("DB_PORT"),
}
es_conf = [
{
"host": os.getenv("ES_HOST"),
"port": os.getenv("ES_PORT"),
}
]
| StarcoderdataPython |
4933353 | <reponame>LordFitoi/feline<filename>feline/jobposts/migrations/0013_auto_20211108_1026.py
# Generated by Django 3.1.13 on 2021-11-08 14:26
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobposts', '0012_auto_20211103_0559'),
]
operations = [
migrations.AlterField(
model_name='jobpost',
name='how_to_apply',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
]
| StarcoderdataPython |
1694076 | <reponame>abcdabcd987/acm-compiler-judge
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals
import os
import csv
import sys
import time
import json
import shutil
import codecs
import hashlib
import StringIO
from datetime import datetime
from collections import namedtuple
from jinja2 import Environment, PackageLoader, select_autoescape, Template
import utils
import settings
from models import *
from database import db_session, init_db
def initdb():
key = int(time.time()) // 60
key = hashlib.md5(str(key)).hexdigest()[:6]
if len(sys.argv) != 3 or sys.argv[2] != key:
print('please run the following command within the current minute')
print(' ./maintenance.py initdb %s' % key)
sys.exit(1)
print('initializing the database')
init_db()
print('done!')
def add_compiler():
if len(sys.argv) != 4:
print('usage: ./maintenance.py add_compiler <student> <repo_url>')
sys.exit(1)
student = sys.argv[2].decode('utf-8')
repo_url = sys.argv[3]
c = Compiler(student=student, repo_url=repo_url)
db_session.add(c)
db_session.commit()
print('done!')
def add_testcase():
if len(sys.argv) not in [3, 4]:
print('usage: ./maintenance.py add_testcase <path_to_testcase> [-y]')
sys.exit(1)
with codecs.open(sys.argv[2], 'r', 'utf-8') as f:
content = f.read()
t = utils.parse_testcase(content)
if len(sys.argv) == 4:
assert sys.argv[3] == '-y'
else:
print(utils.testcase_to_text(t))
confirm = raw_input('Confirm (y/n)? ')
assert confirm.strip() == 'y'
testcase = Testcase(enabled=True,
phase=t['phase'],
is_public=t['is_public'],
comment=t['comment'],
timeout=t.get('timeout', None),
cnt_run=0,
cnt_hack=0,
content=json.dumps(t))
db_session.add(testcase)
db_session.commit()
tphase = utils.phase_to_index(testcase.phase)
for compiler in db_session.query(Compiler):
version = db_session.query(Version).filter(Version.id == compiler.latest_version_id).first()
if not version:
continue
vphase = utils.phase_to_index(version.phase)
if vphase > tphase or (vphase == tphase and version.status != 'pending'):
r = TestRun(version_id=version.id,
testcase_id=testcase.id,
phase=testcase.phase,
status='pending',
created_at=datetime.utcnow())
db_session.add(r)
version.phase = testcase.phase
version.status = 'running'
db_session.commit()
print('done!', sys.argv[2])
def set_testcase():
if len(sys.argv) != 4:
print('usage: ./maintenance.py set_testcase <testcase_id> enable/disable')
sys.exit(1)
testcase_id = int(sys.argv[2])
assert sys.argv[3] in ['enable', 'disable']
enabled = sys.argv[3] == 'enable'
t = db_session.query(Testcase).filter(Testcase.id == testcase_id).one()
t.enabled = enabled
db_session.commit()
print('done!')
def rejudge_version():
if len(sys.argv) != 3:
print('usage: ./maintenance.py rejudge_version <version_id>')
sys.exit(1)
version_id = int(sys.argv[2])
old = db_session.query(Version).filter(Version.id == version_id).one()
compiler = db_session.query(Compiler).filter(Compiler.id == old.compiler_id).one()
new = Version(compiler_id=old.compiler_id,
sha=old.sha,
phase='build',
status='pending')
db_session.add(new)
db_session.commit()
compiler.latest_version_id = new.id
db_session.commit()
print('done! the new version_id is', new.id)
def clear_judge_testcase_cache():
os.system('rm {}/*'.format(settings.JUDGE_TESTCASE_PATH))
def makedirs():
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
mkdir(settings.CORE_BUILD_LOG_PATH)
mkdir(settings.CORE_TESTRUN_STDERR_PATH)
mkdir(settings.JUDGE_GIT_REPO_PATH)
mkdir(settings.JUDGE_TESTCASE_PATH)
print('done!')
def final_rejudge():
if len(sys.argv) != 4:
print('usage: ./maintenance.py final_rejudge <input_csv> <output_csv>')
sys.exit(1)
compilers = {c.id: c for c in db_session.query(Compiler)}
submit_versions = set()
with open(sys.argv[2]) as fin:
reader = csv.DictReader(fin)
fieldnames = reader.fieldnames
for row in reader:
cid = int(row['cid'])
assert cid in compilers
assert compilers[cid].student == row['name'].decode('utf-8')
for col in fieldnames:
value = row[col]
if col.decode('utf-8') in ('cid', 'name') or not value:
continue
submit_versions.add(int(value))
with open(sys.argv[2]) as fin, open(sys.argv[3], 'w') as fout:
rejudge_versions = {}
for version_id in submit_versions:
old = db_session.query(Version).filter(Version.id == version_id).one()
new = Version(compiler_id=old.compiler_id,
sha=old.sha,
phase='build',
status='pending')
db_session.add(new)
db_session.commit()
rejudge_versions[old.id] = new.id
reader = csv.DictReader(fin)
writer = csv.DictWriter(fout, fieldnames)
writer.writeheader()
for row in reader:
for col in fieldnames:
value = row[col]
if col.decode('utf-8') in ('cid', 'name') or not value:
continue
row[col] = rejudge_versions[int(value)]
writer.writerow(row)
print('done!', len(rejudge_versions), 'submits to run')
def generate_final_result():
if len(sys.argv) != 4:
print('usage: ./maintenance.py generate_final_result <rejudge_csv> <output_dir>')
sys.exit(1)
# rank 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
SCORE = [0, 15, 14, 13, 11, 11, 9, 9, 7, 7, 7, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 1]
DISCOUNT = {'0531': 0, '0601': 1, '0602': 1+2, '0603': 1+2+3, '0611': 1+2+3+4}
def set_rank(l, key):
last_value = None
last_rank = None
for i, d in enumerate(l, start=1):
value = float('{:.3f}'.format(d[key]))
if value != last_value:
last_value = value
last_rank = i
d['rank'] = last_rank
def gen_testcase_rank(versions, version_ids):
testcases = {}
for version_id in version_ids:
for t in versions[version_id]['testcase'].itervalues():
if t.testcase_id not in testcases:
testcases[t.testcase_id] = []
if t.status == 'passed':
testcases[t.testcase_id].append(dict(t.__dict__))
for t in testcases:
l = sorted(testcases[t], key=lambda testcase: testcase['running_time'])
set_rank(l, 'running_time')
for d in l:
rank = d['rank']
d['points'] = 31 - rank
running_times = map(lambda testcase: testcase['running_time'], l)
median = running_times[len(running_times) // 2]
if len(running_times) % 2 == 0:
median += running_times[len(running_times) // 2 - 1]
median /= 2
testcases[t] = {
'testcase_id': t,
'list': l,
'vmap': {x['version_id']: x for x in l},
'min': running_times[0],
'max': running_times[-1],
'avg': sum(running_times) / len(running_times),
'median': median,
}
for t in testcases.keys():
if testcases[t]['max'] < 0.1:
del testcases[t]
return testcases
def gen_person_rank(rejudge_table, versions, testcase_rank, discount):
people = {cid: {'points': 0, 'cid': cid} for cid in rejudge_table}
for t in testcase_rank.itervalues():
for d in t['list']:
version_id = d['version_id']
cid = versions[version_id]['version'].compiler_id
people[cid]['points'] += d['points']
people[cid]['version_id'] = version_id
for cid in people.keys():
if 'version_id' not in people[cid]:
del people[cid]
people = sorted(people.itervalues(), key=lambda x: x['points'], reverse=True)
set_rank(people, 'points')
for person in people:
score = SCORE[person['rank']] if person['rank'] < len(SCORE) else SCORE[-1]
person['score'] = 85 + score
person['discounted_score'] = person['score'] * (1.0 - discount / 100.0)
return people
def gen_final_rank(person_rank):
final_dict = {}
for day in person_rank:
for person in person_rank[day]:
cid = person['cid']
if cid not in final_dict:
final_dict[cid] = {}
final_dict[cid][day] = person['discounted_score']
for cid, d in final_dict.iteritems():
max_score, max_day = None, None
for day, score in d.iteritems():
if max_score is None or score > max_score:
max_score = score
max_day = day
d['max_score'], d['max_day'] = max_score, max_day
d['cid'] = cid
final = sorted(final_dict.itervalues(), key=lambda x: x['max_score'], reverse=True)
set_rank(final, 'max_score')
return final
# fetch all kinds of information
compilers = {c.id: c for c in db_session.query(Compiler)}
rejudge_versions = set()
rejudge_table = {}
with open(sys.argv[2]) as fin:
reader = csv.DictReader(fin)
fieldnames = reader.fieldnames
for row in reader:
d = {}
cid = int(row['cid'])
assert cid in compilers
assert compilers[cid].student == row['name'].decode('utf-8')
for col in fieldnames:
value = row[col]
if col.decode('utf-8') in ('cid', 'name') or not value:
continue
rejudge_versions.add(int(value))
d[col] = int(value)
rejudge_table[cid] = d
versions = {v.id: {'version': v, 'testcase': {}}
for v in db_session.query(Version).filter(Version.id.in_(rejudge_versions))}
query = db_session.query(TestRun) \
.filter(TestRun.version_id.in_(rejudge_versions)) \
.filter(TestRun.phase.in_(['optim pretest', 'optim extended']))
for t in query:
versions[t.version_id]['testcase'][t.testcase_id] = t
days = list(fieldnames)
days.remove('cid')
days.remove('name')
# setup output environment
output_dir = sys.argv[3]
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
env = Environment(
loader=PackageLoader('core', 'templates'),
autoescape=select_autoescape(['html', 'xml']),
)
env.filters['nl2monobr'] = utils.nl2monobr
env.globals.update(website_name=settings.WEBSITE_NAME)
env.globals.update(ROOT=settings.WEBROOT)
env.globals.update(FINAL_ROOT=settings.FINAL_ROOT)
# generate testcase rank
testcase_rank = gen_testcase_rank(versions, rejudge_versions)
template = env.get_template('final_testcase.html')
os.makedirs(os.path.join(output_dir, 'all'))
for t, d in testcase_rank.iteritems():
filename = os.path.join(output_dir, 'all', 'testcase-{}.html'.format(t))
with codecs.open(filename, 'w', 'utf-8') as f:
f.write(template.render(compilers=compilers,
versions=versions,
day='All',
show_score=False,
**d))
# generate daily rank
testcase_rank = {}
person_rank = {}
for day in days:
# collect version_ids
version_ids = set()
for cid in rejudge_table:
version_id = rejudge_table[cid].get(day, None)
if version_id and versions[version_id]['version'].phase == 'end':
version_ids.add(version_id)
# generate daily testcase rank
testcase_rank[day] = gen_testcase_rank(versions, version_ids)
template = env.get_template('final_testcase.html')
os.makedirs(os.path.join(output_dir, day))
for t, d in testcase_rank[day].iteritems():
filename = os.path.join(output_dir, day, 'testcase-{}.html'.format(t))
with codecs.open(filename, 'w', 'utf-8') as f:
f.write(template.render(compilers=compilers,
versions=versions,
day=day,
show_score=True,
**d))
# generate daily person rank
person_rank[day] = gen_person_rank(rejudge_table, versions, testcase_rank[day], DISCOUNT[day])
testcase_list = sorted(testcase_rank[day].iterkeys())
template = env.get_template('final_person.html')
filename = os.path.join(output_dir, day, 'result.html')
with codecs.open(filename, 'w', 'utf-8') as f:
f.write(template.render(compilers=compilers,
versions=versions,
testcase_rank=testcase_rank[day],
testcase_list=testcase_list,
discount=DISCOUNT[day],
day=day,
people=person_rank[day]))
# generate final rank
final_rank = gen_final_rank(person_rank)
template = env.get_template('final_rank.html')
filename = os.path.join(output_dir, 'result.html')
with codecs.open(filename, 'w', 'utf-8') as f:
f.write(template.render(compilers=compilers,
versions=versions,
final_rank=final_rank,
DISCOUNT=DISCOUNT,
days=days))
if __name__ == '__main__':
actions = [
add_compiler,
add_testcase,
set_testcase,
rejudge_version,
initdb,
makedirs,
clear_judge_testcase_cache,
final_rejudge,
generate_final_result,
]
action_map = {func.__name__: func for func in actions}
if len(sys.argv) < 2 or sys.argv[1] not in action_map:
print('usage: ./maintenance.py <action>')
print('<action> can be:')
for k in actions:
print(' %s' % k.__name__)
sys.exit(1)
action_map[sys.argv[1]]()
| StarcoderdataPython |
11331606 | <filename>test/tests/multiprocessing_test.py
import multiprocessing
# from https://docs.python.org/2/library/multiprocessing.html
def f(x):
return x*x
if __name__ == '__main__':
p = multiprocessing.Pool(5)
print(p.map(f, [1, 2, 3]))
def f(name):
print 'hello', name
if __name__ == '__main__':
p = multiprocessing.Process(target=f, args=('bob',))
p.start()
p.join()
def f(q):
q.put([42, None, 'hello'])
if __name__ == '__main__':
q = multiprocessing.Queue()
p = multiprocessing.Process(target=f, args=(q,))
p.start()
print q.get() # prints "[42, None, 'hello']"
p.join()
| StarcoderdataPython |
6554682 | # DEPRECATED!!
import torch.nn as nn
import torch
import torch.optim as optimizer
import torch.nn.functional as F
'''
input : (15x15) numpy array
output : realno število ??
'''
class NNFullyConnected(nn.Module):
"""
Returns a fully connected neural network of
given dimensions. The input is of dimensions
of the shape of the observations and output
is of the shape of number of actions.
TODO: fully connected je za en kurac, k js hočem threadse gledat
"""
def __init__(self, sizes, activation=nn.ReLU(inplace=True), output_activation=None):
super(NNFullyConnected, self).__init__()
layers = []
for i in range(len(sizes) - 1):
layers.append(nn.Linear(sizes[i], sizes[i+1]))
if i < len(sizes) - 2:
layers.append(activation)
self.fwd = nn.Sequential(*layers)
def forward(self, x):
return F.softmax(self.fwd(x), dim=-1)
## class Mozgani():
## '''
## Overlay
## '''
## def __init__(self, nn) -> None:
## self.nn = nn
## def igrajPotezo()
class EncodingNNPolicy():
'''
Varianta Encoding + NN
Ideja za Learning: Policy gradients za NN, hyperopt+hyper_optimisation.py za parametre
'''
def __init__(self) -> None:
self.nn = NNPolicy([255, 100, 1])
# already calculated potentials
self.memory = []
def train():
return NotImplementedError
if __name__ == '__main__':
raise NotImplementedError | StarcoderdataPython |
1682119 | <reponame>bmacphee/sqlalchemy
from typing import List
from typing import TYPE_CHECKING
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import relationship
from sqlalchemy.orm.decl_api import declared_attr
from sqlalchemy.orm.relationships import RelationshipProperty
from . import Base
if TYPE_CHECKING:
from .address import Address
class User(Base):
name = Column(String)
othername = Column(String)
addresses: Mapped[List["Address"]] = relationship(
"Address", back_populates="user"
)
class HasUser:
@declared_attr
def user_id(self) -> "Column[Integer]":
return Column(
Integer,
ForeignKey(User.id, ondelete="CASCADE", onupdate="CASCADE"),
nullable=False,
)
@declared_attr
def user(self) -> RelationshipProperty[User]:
return relationship(User)
| StarcoderdataPython |
11239101 | # coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.models.coupled_model import CoupledModel # noqa: E501
from openapi_server.test import BaseTestCase
class TestCoupledModelController(BaseTestCase):
"""CoupledModelController integration test stubs"""
def test_coupledmodels_get(self):
"""Test case for coupledmodels_get
List all instances of CoupledModel
"""
query_string = [('username', '<EMAIL>')]
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/v1.7.0/coupledmodels',
method='GET',
headers=headers,
query_string=query_string)
self.logger.info("Response length {}".format(len(response.json)))
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
# def test_coupledmodels_id_delete(self):
# """Test case for coupledmodels_id_delete
#
# Delete an existing CoupledModel
# """
# headers = {
# 'Authorization': 'Bearer special-key',
# }
# response = self.client.open(
# '/v1.7.0/coupledmodels/{id}'.format(id='id_example', user='user_example'),
# method='DELETE',
# headers=headers)
# self.assert200(response,
# 'Response body is : ' + response.data.decode('utf-8'))
#
# def test_coupledmodels_id_get(self):
# """Test case for coupledmodels_id_get
#
# Get a single CoupledModel by its id
# """
# query_string = [('username', 'username_example')]
# headers = {
# 'Accept': 'application/json',
# }
# response = self.client.open(
# '/v1.7.0/coupledmodels/{id}'.format(id='id_example'),
# method='GET',
# headers=headers,
# query_string=query_string)
# self.assert200(response,
# 'Response body is : ' + response.data.decode('utf-8'))
#
# def test_coupledmodels_id_put(self):
# """Test case for coupledmodels_id_put
#
# Update an existing CoupledModel
# """
# coupled_model = {
# "value" : {
# "id" : "some_id"
# }
#}
# headers = {
# 'Accept': 'application/json',
# 'Content-Type': 'application/json',
# 'Authorization': 'Bearer special-key',
# }
# response = self.client.open(
# '/v1.7.0/coupledmodels/{id}'.format(id='id_example', user='user_example'),
# method='PUT',
# headers=headers,
# data=json.dumps(coupled_model),
# content_type='application/json')
# self.assert200(response,
# 'Response body is : ' + response.data.decode('utf-8'))
#
# def test_coupledmodels_post(self):
# """Test case for coupledmodels_post
#
# Create one CoupledModel
# """
# coupled_model = {
# "value" : {
# "id" : "some_id"
# }
#}
# headers = {
# 'Accept': 'application/json',
# 'Content-Type': 'application/json',
# 'Authorization': 'Bearer special-key',
# }
# response = self.client.open(
# '/v1.7.0/coupledmodels'.format(user='user_example'),
# method='POST',
# headers=headers,
# data=json.dumps(coupled_model),
# content_type='application/json')
# self.assert200(response,
# 'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6452167 | #More guests:
names = ['tony','steve','thor']
message = ", You are invited!"
attendeemessage = " is not coming."
print(names[0]+message)
print(names[1]+message)
print(names[2]+message)
print(names[1]+attendeemessage)
del names[1]
names.insert(1,'peter')
print(names[0]+message)
print(names[1]+message)
print(names[2]+message)
print("Ok, We found new table for booking.")
names.insert(0,'jessi')
names.insert(3,'laura')
names.append('sandy')
print(names[0]+message)
print(names[1]+message)
print(names[2]+message)
print(names[3]+message)
print(names[4]+message)
print(names[5]+message)
| StarcoderdataPython |
4995709 | import open3d as o3d
import numpy as np
import matplotlib.pyplot as plt
# Grab intensity of items = how much it reflects off object
intensity = []
with open('object3dF1.pcd') as f:
coordinates = f.readlines()
for line in coordinates:
try:
values = list(map(float,line.split()))
intensity.append(values[3])
except:
pass
# Read in point cloud file and visualize
# Full scan
pcd = o3d.io.read_point_cloud('object3dF1.pcd')
o3d.visualization.draw_geometries([pcd])
#pcd = o3d.io.read_point_cloud('object3dF2.pcd')
#o3d.visualization.draw_geometries([pcd])
#pcd = o3d.io.read_point_cloud('object3d2.pcd')
# Hallway
#pcd = o3d.io.read_point_cloud('object3Y.pcd')
# Lab room more points
#pcd = o3d.io.read_point_cloud('labscene.pcd')
# New room with box
#pcd = o3d.io.read_point_cloud('1inliers2.pcd')
#o3d.visualization.draw_geometries([pcd])
# Max angle is calculated using a simple free body diagram, assuming
# a coefficient of friction on a carpet of 0.016 and assuming
# the robot moves at a constant velocity, making the acceleration = 0
maxangle = 0.92;
min_z = min(pcd.points[2])
#print(min_z, max(pcd.points[2]))
max_z_floor = 0.001*maxangle*min_z
# Translate into 2D by removing anything considered "floor"
# Recognizing anything remaining with a z component to be an obstacle
# with open('inlierfilterpcd.xyz','w') as f:
# for ptcld in pcd.points:
# if ptcld[2] > max_z_floor:
# #print("in loop")
# f.write(str(ptcld[0]))
# f.write(" ")
# f.write(str(ptcld[1]))
# f.write(" ")
# f.write(str(ptcld[2]))
# f.write("\n")
with open('object3dfiltered.xyz','w') as f:
i = 0
for ptcld in pcd.points:
if int(intensity[i]) > 50:
f.write(str(ptcld[0]))
f.write(" ")
f.write(str(ptcld[1]))
f.write(" ")
f.write(str(ptcld[2]))
f.write("\n")
i = i + 1
newpcd = o3d.io.read_point_cloud('object3dfiltered.xyz')
o3d.visualization.draw_geometries([newpcd])
#Cluster
with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:
# labels is an array of the cluster number for each point cloud
labels = np.array(newpcd.cluster_dbscan(eps=0.2,min_points=12,print_progress=False))
# Each cluster has its own label number
max_label = labels.max()
colors = plt.get_cmap("summer")(labels / (max_label if max_label > 0 else 1))
colors[labels < 0] = 0
newpcd.colors = o3d.utility.Vector3dVector(colors[:, :3])
indices=[];
j = 0;
for color in (np.asarray(newpcd.colors)):
if color[0] == 0 and color[1] == 0 and color[2] == 0:
continue
else:
indices.append(j)
j = j + 1
points = np.asarray(newpcd.points)
newpcd.points = o3d.utility.Vector3dVector(points[indices,:])
with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:
labels = np.array(newpcd.cluster_dbscan(eps=0.2,min_points=12,print_progress=True))
max_label = labels.max()
print(f"point cloud has {max_label+1} clusters")
colors = plt.get_cmap("summer")(labels / (max_label if max_label > 0 else 1))
colors[labels < 0] = 0
newpcd.colors = o3d.utility.Vector3dVector(colors[:, :3])
o3d.visualization.draw_geometries([newpcd])
#print(labels)
# Octree
octree = o3d.geometry.Octree(max_depth=6)
octree.convert_from_point_cloud(newpcd,size_expand=0.05)
#octree.traverse(f_traverse)
o3d.visualization.draw_geometries([octree])
x = octree.locate_leaf_node(newpcd.points[10])
string1 = str(x[0])
string2 = str(x[1])
new1 = string1.strip('OctreePointColorLeafNode with color ')
new2 = string2.strip('OctreeNodeInfo with origin ')
#print(new1)
#print(new2)
#print(octree.locate_leaf_node(newpcd.points[10]))
#print(octree.locate_leaf_node(newpcd.points[11]))
#print(octree.locate_leaf_node(newpcd.points[15]))
if __name__ == "__main__":
pass
| StarcoderdataPython |
9650556 | from construct import *
from construct.lib import *
def enum_int_range_u__constants(subcon):
return Enum(subcon,
zero=0,
int_max=4294967295,
)
enum_int_range_u = Struct(
'f1' / enum_int_range_u__constants(Int32ub),
'f2' / enum_int_range_u__constants(Int32ub),
)
_schema = enum_int_range_u
| StarcoderdataPython |
1791572 | #!/usr/bin/env python
import requests
HTTP POST message to cloud platform
#Most times gateway communicate with cloud via MQTT(hbmqtt framework),
#in some special time, it need to use HTTP(requests framework) to POST message to cloud.
| StarcoderdataPython |
1746538 | #Import accel module
from rstem import accel
from time import sleep
#Initialze accelerometer on i2c bus 1, on early Pi's this may be 0 instead
accel.init(1)
#Loop to display values
while True:
force = accel.read() #Returns a tuple of the form (x, y, z) acceleration
angles = accel.angles() #Returns a tuple of the form (roll, pitch, elevation) force
# Print values in a nicely formatted way
print("X: {0[0]: < 8.4f} Y: {0[1]: < 8.4f} Z: {0[2]: < 8.4f} Roll: {1[0]: < 8.4f} Pitch: {1[1]: < 8.4f} Elevation: {1[2]: < 8.4f}".format(force, angles))
sleep(0.25) | StarcoderdataPython |
1954949 |
class URLValidator:
def validate_url(self, url: str) -> bool:
pass
| StarcoderdataPython |
9753068 | <reponame>kids-first/kf-api-release-coordinator
import json
import pytest
from coordinator.api.models import Release, Event
from coordinator.api.models.release import next_version
def test_version_bumping(db):
r = Release()
r.save()
assert str(r.version) == '0.0.0'
assert str(next_version()) == '0.0.1'
assert str(next_version(major=True)) == '1.0.0'
assert str(next_version(minor=True)) == '0.1.0'
assert str(next_version(patch=True)) == '0.0.1'
def test_no_releases(client, transactional_db):
""" Test basic response """
assert Release.objects.count() == 0
resp = client.get('http://testserver/releases')
assert resp.status_code == 200
def test_new_release(admin_client, transactional_db, studies):
""" Test that new releases may be made """
assert Release.objects.count() == 0
release = {
'name': 'My Release',
'studies': ['SD_00000001'],
'author': 'bob'
}
resp = admin_client.post('http://testserver/releases', data=release)
assert resp.status_code == 201
assert Release.objects.count() == 1
res = resp.json()
assert res['kf_id'].startswith('RE_')
assert len(res['kf_id']) == 11
assert res['author'] == 'bob'
assert res['tags'] == []
assert res['studies'] == ['SD_00000001']
assert res['version'] == '0.0.0'
def test_release_filters(client, db, releases):
""" Test that releases can be filtered by state """
resp = client.get('http://testserver/releases?state=waiting')
assert resp.json()['count'] == 5
r = list(releases.values())
r[0].state = 'staged'
r[0].save()
r[1].state = 'published'
r[1].save()
r[2].state = 'published'
r[2].save()
r[3].state = 'running'
r[3].save()
resp = client.get('http://testserver/releases?state=published')
assert resp.json()['count'] == 2
resp = client.get('http://testserver/releases?state=staged')
assert resp.json()['count'] == 1
assert resp.json()['results'][0]['kf_id'] == r[0].kf_id
resp = client.get('http://testserver/releases?state=None')
assert resp.json()['count'] == 0
def test_patch_bump(admin_client, transactional_db, studies):
assert Release.objects.count() == 0
release = {
'name': 'First Release',
'studies': ['SD_00000001'],
'author': 'bob'
}
resp = admin_client.post('http://testserver/releases', data=release)
assert Release.objects.count() == 1
res = resp.json()
assert res['version'] == '0.0.0'
release = {
'name': 'Second Release',
'studies': ['SD_00000001'],
'author': 'bob'
}
resp = admin_client.post('http://testserver/releases', data=release)
assert Release.objects.count() == 2
res = resp.json()
assert res['version'] == '0.0.1'
resp = admin_client.get('http://testserver/releases')
res = resp.json()
assert len(res['results']) == 2
assert res['results'][0]['version'] == '0.0.1'
assert res['results'][1]['version'] == '0.0.0'
def test_minor_bump(admin_client, transactional_db, studies, worker):
""" Test that the minor version number is bumped upon publish """
release = {
'name': 'First Release',
'studies': ['SD_00000001'],
'author': 'bob'
}
resp = admin_client.post('http://testserver/releases', data=release)
assert Release.objects.count() == 1
res = resp.json()
assert res['version'] == '0.0.0'
worker.work(burst=True)
resp = admin_client.get('http://testserver/releases/'+res['kf_id'])
res = resp.json()
resp = admin_client.post('http://testserver/releases/' +
res['kf_id']+'/publish')
worker.work(burst=True)
resp = admin_client.get('http://testserver/releases/'+res['kf_id'])
res = resp.json()
assert res['version'] == '0.1.0'
assert str(Release.objects.first().version) == '0.1.0'
def test_minor_bump(admin_client, transactional_db, studies, worker):
""" Test that the major version number is bumped upon publish """
release = {
'name': 'First Release',
'studies': ['SD_00000001'],
'author': 'bob',
'is_major': True,
}
resp = admin_client.post('http://testserver/releases', data=release)
assert Release.objects.count() == 1
res = resp.json()
assert res['version'] == '0.0.0'
worker.work(burst=True)
resp = admin_client.get('http://testserver/releases/'+res['kf_id'])
res = resp.json()
resp = admin_client.post('http://testserver/releases/' +
res['kf_id']+'/publish')
worker.work(burst=True)
resp = admin_client.get('http://testserver/releases/'+res['kf_id'])
res = resp.json()
assert res['version'] == '1.0.0'
assert str(Release.objects.first().version) == '1.0.0'
def test_version_readonly(admin_client, studies):
""" Test that the user may not assign the version """
release = {
'name': 'First Release',
'studies': ['SD_00000001'],
'version': '1.1.1',
}
resp = admin_client.post('http://testserver/releases', data=release)
res = resp.json()
assert res['version'] == '0.0.0'
def test_new_tag(admin_client, transactional_db, study):
""" Test that tags are updated correctly """
assert Release.objects.count() == 0
release = {
'name': 'My Release',
'studies': ['SD_00000001']
}
resp = admin_client.post('http://testserver/releases', data=release)
assert resp.status_code == 201
assert Release.objects.count() == 1
assert resp.json()['tags'] == []
kf_id = resp.json()['kf_id']
tags = {'tags': ['Needs Review', 'Data Fix'], 'studies': ['SD_00000001']}
resp = admin_client.patch('http://testserver/releases/'+kf_id,
data=json.dumps(tags),
content_type='application/json')
assert resp.status_code == 200
assert resp.json()['tags'] == tags['tags']
def test_get_release_by_id(client, transactional_db, release):
""" Test that releases may be retrieved by id """
assert release['kf_id'].startswith('RE_')
assert len(release['kf_id']) == 11
resp = client.get('http://testserver/releases/'+release['kf_id'])
assert resp.status_code == 200
assert Release.objects.count() == 1
assert resp.json()['kf_id'].startswith('RE_')
assert len(resp.json()['kf_id']) == 11
def test_cancel_release(admin_client, transactional_db, release, worker):
""" Test that a release is canceled and not deleted """
kf_id = release['kf_id']
assert Release.objects.count() == 1
resp = admin_client.delete('http://testserver/releases/'+kf_id)
worker.work(burst=True)
assert Release.objects.count() == 1
res = resp.json()
assert res['state'] == 'canceling'
resp = admin_client.get('http://testserver/releases/'+kf_id)
res = resp.json()
assert res['state'] == 'canceled'
# Make sure that we don't re-cancel the release
assert Event.objects.count() == 2
resp = admin_client.delete('http://testserver/releases/'+kf_id)
assert Event.objects.count() == 2
def test_cancel_release_404(admin_client, transactional_db, release):
""" Test that a release is canceled and not deleted """
kf_id = release['kf_id']
assert Release.objects.count() == 1
resp = admin_client.delete('http://testserver/releases/RE_00000000')
assert Release.objects.count() == 1
res = resp.json()
resp = admin_client.get('http://testserver/releases/'+kf_id)
res = resp.json()
assert res['state'] == 'waiting'
def test_study_validator(admin_client, transactional_db):
""" Test that only correctly formatted study ids are accepted """
release = {
'name': 'My Release',
'studies': ['SD_000', 'SD_00000000'],
}
resp = admin_client.post('http://testserver/releases', data=release)
assert resp.status_code == 400
res = resp.json()
assert 'studies' in res
assert len(res['studies']) == 1
assert res['studies'][0] == 'Invalid pk "SD_000" - object does not exist.'
release = {
'name': 'Release 1',
'studies': [],
}
resp = admin_client.post('http://testserver/releases', data=release)
assert resp.status_code == 400
res = resp.json()
assert 'studies' in res
assert len(res['studies']) == 1
assert 'Must have at least one study' in res['studies'][0]
def test_release_relations(client, transactional_db, task):
resp = client.get('http://testserver/releases')
res = resp.json()['results'][0]
assert 'tasks' in res
assert len(res['tasks']) == 1
assert 'kf_id' in res['tasks'][0]
assert res['tasks'][0]['progress'] == 0
| StarcoderdataPython |
226078 | <reponame>klassen-software-solutions/BuildSystem
#!/usr/bin/env python3
"""Program to run a static analysis on a directory.
Note that at present this simply runs pylint.
"""
import os
import subprocess
import sys
def _main(directory):
if not os.path.isdir(directory):
print("'%s' does not exist or is not a directory" % directory)
sys.exit(-1)
subprocess.run("pylint %s" % directory, shell=True, check=True)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("usage: python_analyzer.py <directory>", file=sys.stderr)
sys.exit(-1)
_main(sys.argv[1])
| StarcoderdataPython |
4904471 | <reponame>TianXie1999/selective-inference
import numpy as np
import sys
from scipy.stats import norm
import regreg.api as rr
from .credible_intervals import projected_langevin
from .lasso_reduced import nonnegative_softmax_scaled, neg_log_cube_probability
class selection_probability_objective_ms_lasso(rr.smooth_atom):
def __init__(self,
X,
feasible_point, #in R^{|E|_1 + |E|_2}
active_1, #the active set chosen by randomized marginal screening
active_2, #the active set chosen by randomized lasso
active_signs_1, #the set of signs of active coordinates chosen by ms
active_signs_2, #the set of signs of active coordinates chosen by lasso
lagrange, #in R^p
threshold, #in R^p
mean_parameter, # in R^n
noise_variance,
randomizer,
epsilon, #ridge penalty for randomized lasso
coef=1.,
offset=None,
quadratic=None,
nstep=10):
n, p = X.shape
self._X = X
E_1 = active_1.sum()
E_2 = active_2.sum()
sigma = np.sqrt(noise_variance)
self.active_1 = active_1
self.active_2 = active_2
self.noise_variance = noise_variance
self.randomization = randomizer
self.inactive_conjugate = self.active_conjugate = randomizer.CGF_conjugate
if self.active_conjugate is None:
raise ValueError(
'randomization must know its CGF_conjugate -- currently only isotropic_gaussian and laplace are implemented and are assumed to be randomization with IID coordinates')
initial = np.zeros(n + E_1 + E_2, )
initial[n:] = feasible_point
self.n = n
rr.smooth_atom.__init__(self,
(n + E_1 + E_2,),
offset=offset,
quadratic=quadratic,
initial=initial,
coef=coef)
self.coefs[:] = initial
nonnegative = nonnegative_softmax_scaled(E_1 + E_2)
opt_vars = np.zeros(n + E_1 + E_2, bool)
opt_vars[n:] = 1
self._opt_selector = rr.selector(opt_vars, (n + E_1 + E_2,))
self.nonnegative_barrier = nonnegative.linear(self._opt_selector)
self._response_selector = rr.selector(~opt_vars, (n + E_1 + E_2,))
self.set_parameter(mean_parameter, noise_variance)
arg_ms = np.zeros(self.n + E_1 + E_2, bool)
arg_ms[:self.n + E_1] = 1
arg_lasso = np.zeros(self.n + E_1, bool)
arg_lasso[:self.n] = 1
arg_lasso = np.append(arg_lasso, np.ones(E_2, bool))
self.A_active_1 = np.hstack([np.true_divide(-X[:, active_1].T, sigma), np.identity(E_1)
* active_signs_1[None, :]])
self.A_inactive_1 = np.hstack([np.true_divide(-X[:, ~active_1].T, sigma), np.zeros((p - E_1, E_1))])
self.offset_active_1 = active_signs_1 * threshold[active_1]
self.offset_inactive_1 = np.zeros(p - E_1)
self._active_ms = rr.selector(arg_ms, (self.n + E_1 + E_2,),
rr.affine_transform(self.A_active_1, self.offset_active_1))
self._inactive_ms = rr.selector(arg_ms, (self.n + E_1 + E_2,),
rr.affine_transform(self.A_inactive_1, self.offset_inactive_1))
self.active_conj_loss_1 = rr.affine_smooth(self.active_conjugate, self._active_ms)
self.q_1 = p - E_1
cube_obj_1 = neg_log_cube_probability(self.q_1, threshold[~active_1], randomization_scale=1.)
self.cube_loss_1 = rr.affine_smooth(cube_obj_1, self._inactive_ms)
X_step2 = X[:, active_1]
X_E_2 = X_step2[:, active_2]
B = X_step2.T.dot(X_E_2)
B_E = B[active_2]
B_mE = B[~active_2]
self.A_active_2 = np.hstack(
[-X_step2[:, active_2].T, (B_E + epsilon * np.identity(E_2)) * active_signs_2[None, :]])
self.A_inactive_2 = np.hstack([-X_step2[:, ~active_2].T, (B_mE * active_signs_2[None, :])])
self.offset_active_2 = active_signs_2 * lagrange[active_2]
self.offset_inactive_2 = np.zeros(E_1 - E_2)
self._active_lasso = rr.selector(arg_lasso, (self.n + E_1 + E_2,),
rr.affine_transform(self.A_active_2, self.offset_active_2))
self._inactive_lasso = rr.selector(arg_lasso, (self.n + E_1 + E_2,),
rr.affine_transform(self.A_inactive_2, self.offset_inactive_2))
self.active_conj_loss_2 = rr.affine_smooth(self.active_conjugate, self._active_lasso)
self.q_2 = E_1 - E_2
cube_obj_2 = neg_log_cube_probability(self.q_2, lagrange[~active_2], randomization_scale=1.)
self.cube_loss_2 = rr.affine_smooth(cube_obj_2, self._inactive_lasso)
self.total_loss = rr.smooth_sum([self.active_conj_loss_1,
self.active_conj_loss_2,
self.cube_loss_1,
self.cube_loss_2,
self.likelihood_loss,
self.nonnegative_barrier])
def set_parameter(self, mean_parameter, noise_variance):
"""
Set $\beta_E^*$.
"""
mean_parameter = np.squeeze(mean_parameter)
likelihood_loss = rr.signal_approximator(mean_parameter, coef=1. / noise_variance)
self.likelihood_loss = rr.affine_smooth(likelihood_loss, self._response_selector)
def smooth_objective(self, param, mode='both', check_feasibility=False):
"""
Evaluate the smooth objective, computing its value, gradient or both.
Parameters
----------
mean_param : ndarray
The current parameter values.
mode : str
One of ['func', 'grad', 'both'].
check_feasibility : bool
If True, return `np.inf` when
point is not feasible, i.e. when `mean_param` is not
in the domain.
Returns
-------
If `mode` is 'func' returns just the objective value
at `mean_param`, else if `mode` is 'grad' returns the gradient
else returns both.
"""
param = self.apply_offset(param)
if mode == 'func':
f = self.total_loss.smooth_objective(param, 'func')
return self.scale(f)
elif mode == 'grad':
g = self.total_loss.smooth_objective(param, 'grad')
return self.scale(g)
elif mode == 'both':
f = self.total_loss.smooth_objective(param, 'func')
g = self.total_loss.smooth_objective(param, 'grad')
return self.scale(f), self.scale(g)
else:
raise ValueError("mode incorrectly specified")
def minimize2(self, step=1, nstep=30, tol=1.e-8):
n, p = self._X.shape
current = self.coefs
current_value = np.inf
objective = lambda u: self.smooth_objective(u, 'func')
grad = lambda u: self.smooth_objective(u, 'grad')
for itercount in range(nstep):
newton_step = grad(current) * self.noise_variance
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * newton_step
if np.all(proposal[n:] > 0):
break
step *= 0.5
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
proposal = current - step * newton_step
proposed_value = objective(proposal)
# print(current_value, proposed_value, 'minimize')
if proposed_value <= current_value:
break
step *= 0.5
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value):
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
# print('iter', itercount)
value = objective(current)
return current, value
class sel_prob_gradient_map_ms_lasso(rr.smooth_atom):
def __init__(self,
X,
feasible_point, # in R^{|E|_1 + |E|_2}
active_1, # the active set chosen by randomized marginal screening
active_2, # the active set chosen by randomized lasso
active_signs_1, # the set of signs of active coordinates chosen by ms
active_signs_2, # the set of signs of active coordinates chosen by lasso
lagrange, # in R^p
threshold, # in R^p
generative_X, # in R^{p}\times R^{n}
noise_variance,
randomizer,
epsilon, # ridge penalty for randomized lasso
coef=1.,
offset=None,
quadratic=None):
self.E_1 = active_1.sum()
self.E_2 = active_2.sum()
self.n, self.p = X.shape
self.dim = generative_X.shape[1]
self.noise_variance = noise_variance
(self.X, self.feasible_point, self.active_1, self.active_2, self.active_signs_1, self.active_signs_2,
self.lagrange, self.threshold, self.generative_X, self.noise_variance, self.randomizer, self.epsilon) \
= (X, feasible_point, active_1, active_2, active_signs_1, active_signs_2, lagrange,
threshold, generative_X, noise_variance, randomizer, epsilon)
rr.smooth_atom.__init__(self,
(self.dim,),
offset=offset,
quadratic=quadratic,
coef=coef)
def smooth_objective(self, true_param, mode='both', check_feasibility=False, tol=1.e-6):
true_param = self.apply_offset(true_param)
mean_parameter = np.squeeze(self.generative_X.dot(true_param))
primal_sol = selection_probability_objective_ms_lasso(self.X,
self.feasible_point,
self.active_1,
self.active_2,
self.active_signs_1,
self.active_signs_2,
self.lagrange,
self.threshold,
mean_parameter,
self.noise_variance,
self.randomizer,
self.epsilon)
sel_prob_primal = primal_sol.minimize2(nstep=100)[::-1]
optimal_primal = (sel_prob_primal[1])[:self.n]
sel_prob_val = -sel_prob_primal[0]
optimizer = self.generative_X.T.dot(np.true_divide(optimal_primal - mean_parameter, self.noise_variance))
if mode == 'func':
return sel_prob_val
elif mode == 'grad':
return optimizer
elif mode == 'both':
return sel_prob_val, optimizer
else:
raise ValueError('mode incorrectly specified')
class selective_map_credible_ms_lasso(rr.smooth_atom):
def __init__(self,
y,
grad_map,
prior_variance,
coef=1.,
offset=None,
quadratic=None,
nstep=10):
generative_X = grad_map.generative_X
self.param_shape = generative_X.shape[1]
y = np.squeeze(y)
E_1 = grad_map.E_1
E_2 = grad_map.E_2
self.E = E_2
self.generative_X = grad_map.generative_X
initial = np.zeros(self.E)
#initial[:E_1] = np.squeeze(grad_map.feasible_point[:E_1]* grad_map.active_signs_1[None,:])
initial = np.squeeze(grad_map.feasible_point[E_1:]* grad_map.active_signs_2[None,:])
rr.smooth_atom.__init__(self,
(self.param_shape,),
offset=offset,
quadratic=quadratic,
initial=initial,
coef=coef)
self.coefs[:] = initial
noise_variance = grad_map.noise_variance
self.set_likelihood(y, noise_variance, generative_X)
self.set_prior(prior_variance)
self.initial_state = initial
self.total_loss = rr.smooth_sum([self.likelihood_loss,
self.log_prior_loss,
grad_map])
def set_likelihood(self, y, noise_variance, generative_X):
likelihood_loss = rr.signal_approximator(y, coef=1. / noise_variance)
self.likelihood_loss = rr.affine_smooth(likelihood_loss, generative_X)
def set_prior(self, prior_variance):
self.log_prior_loss = rr.signal_approximator(np.zeros(self.param_shape), coef=1. / prior_variance)
def smooth_objective(self, true_param, mode='both', check_feasibility=False):
true_param = self.apply_offset(true_param)
if mode == 'func':
f = self.total_loss.smooth_objective(true_param, 'func')
return self.scale(f)
elif mode == 'grad':
g = self.total_loss.smooth_objective(true_param, 'grad')
return self.scale(g)
elif mode == 'both':
f, g = self.total_loss.smooth_objective(true_param, 'both')
return self.scale(f), self.scale(g)
else:
raise ValueError("mode incorrectly specified")
def map_solve(self, step=1, nstep=100, tol=1.e-8):
current = self.coefs[:]
current_value = np.inf
objective = lambda u: self.smooth_objective(u, 'func')
grad = lambda u: self.smooth_objective(u, 'grad')
for itercount in range(nstep):
newton_step = grad(current)
# * self.noise_variance
# make sure proposal is a descent
count = 0
while True:
proposal = current - step * newton_step
proposed_value = objective(proposal)
if proposed_value <= current_value:
break
step *= 0.5
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value):
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
value = objective(current)
return current, value
def posterior_samples(self, ndraw=1000, burnin=100):
state = self.initial_state
gradient_map = lambda x: -self.smooth_objective(x, 'grad')
projection_map = lambda x: x
stepsize = 1. / self.E
sampler = projected_langevin(state, gradient_map, projection_map, stepsize)
samples = []
for i in range(ndraw + burnin):
sampler.next()
if i >= burnin:
samples.append(sampler.state.copy())
samples = np.array(samples)
return samples
def posterior_risk(self, estimator_1, estimator_2, ndraw=1000, burnin=0):
state = self.initial_state
print("here", state.shape)
gradient_map = lambda x: -self.smooth_objective(x, 'grad')
projection_map = lambda x: x
stepsize = 1. / self.E
sampler = projected_langevin(state, gradient_map, projection_map, stepsize)
post_risk_1 = 0.
post_risk_2 = 0.
for i in range(ndraw + burnin):
sampler.next()
if i >= burnin:
sample = sampler.state.copy()
#print(sample)
risk_1 = ((estimator_1-sample)**2).sum()
print("adjusted risk", risk_1)
post_risk_1 += risk_1
risk_2 = ((estimator_2-sample) ** 2).sum()
print("unadjusted risk", risk_2)
post_risk_2 += risk_2
return post_risk_1/ndraw, post_risk_2/ndraw
| StarcoderdataPython |
153611 | <gh_stars>0
"""
Stores a Boolean indicating if the app should run in debug mode or not.
This option can be set with -d or --debug on start.
"""
debug = False
"""
Variable storing a reference to the container backend implementation the API should use.
This option can be set with --container-backend CONTAINER_BACKEND on start.
"""
container_backend = None
| StarcoderdataPython |
1750226 | import struct
import GLWindow
import ModernGL
wnd = GLWindow.create_window()
ctx = ModernGL.create_context()
prog = ctx.program(
ctx.vertex_shader('''
#version 330
in vec2 vert;
in vec2 pos;
in float scale;
in vec3 color;
out vec3 v_color;
void main() {
v_color = color;
gl_Position = vec4(pos + vert * scale, 0.0, 1.0);
}
'''),
ctx.fragment_shader('''
#version 330
in vec3 v_color;
out vec4 f_color;
void main() {
f_color = vec4(v_color, 1.0);
}
'''),
])
# Vertex coordinates stored in vbo1
#
# B------D
# | |
# A------C
vbo1 = ctx.buffer(struct.pack(
'8f',
-0.5, -0.5,
-0.5, 0.5,
0.5, -0.5,
0.5, 0.5,
))
# Vertex colors stored in vbo2
#
# A, B are green
# C, D are blue
vbo2 = ctx.buffer(struct.pack(
'12f',
0.0, 1.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
))
# (Per instance) positions and scales stored in vbo3
# There are 8 (position, scale) pairs
vbo3 = ctx.buffer(struct.pack(
'24f',
0.5, 0.0, 0.3,
0.35, 0.35, 0.2,
0.0, 0.5, 0.3,
-0.35, 0.35, 0.2,
-0.5, 0.0, 0.3,
-0.35, -0.35, 0.2,
0.0, -0.5, 0.3,
0.35, -0.35, 0.2,
))
# Index buffer (also called element buffer)
# There are 2 trianges to render
#
# A, B, C
# B, C, D
ibo = ctx.buffer(struct.pack('6i', 0, 1, 2, 1, 2, 3))
# The vao_content is a list of 3-tuples (buffer, format, attribs)
# the format can have an empty or '/v', '/i', '/r' ending.
# '/v' attributes are the default
# '/i` attributes are per instance attributes
# '/r' attributes are default values for the attributes (per render attributes)
vao_content = [
(vbo1, '2f', ['vert']),
(vbo2, '3f', ['color']),
(vbo3, '2f1f/i', ['pos', 'scale']),
]
vao = ctx.vertex_array(prog, vao_content, ibo)
while wnd.update():
ctx.viewport = wnd.viewport
ctx.clear(0.9, 0.9, 0.9)
vao.render(instances=8)
| StarcoderdataPython |
3398294 | <gh_stars>0
# this Python snippet is stored as src/py/api.py
def calculate(body):
niter = body['niter']
from calculatepipy import PiCalculate
pifinder = PiCalculate(niter)
pi = pifinder.calculate()
return {'pi': pi} | StarcoderdataPython |
12855683 | import requests
def ok(event, context):
url = "http://ok:8080/"
response = requests.request("GET", url)
return response.text
| StarcoderdataPython |
5008574 | import asyncio
from typing import List
from .api import ApiProvider, ApiError
from .structs import (
Pair, OrderSide, OrderStatus, OrderType, Period, Candle, Trade,
OrderInBook, OrderBook
)
class Exchange:
"""Interface to base exchange methods."""
def __init__(self, api: ApiProvider = None):
self.api = api or ApiProvider(auth_required=False)
async def get_pairs(self):
"""List all available market pairs."""
data = await self.api.get('public/get-instruments')
return {Pair(i.pop('instrument_name')): i for i in data['instruments']}
async def get_tickers(self, pair: Pair = None):
"""Get tickers in all available markets."""
params = {'instrument_name': pair.value} if pair else None
data = await self.api.get('public/get-ticker', params)
if pair:
data.pop('i')
return data
return {Pair(ticker.pop('i')): ticker for ticker in data}
async def get_trades(self, pair: Pair):
"""Get last 200 trades in a specified market."""
data = await self.api.get(
'public/get-trades', {'instrument_name': pair.value})
for trade in data:
trade.pop('i')
trade.pop('dataTime')
return data
async def get_price(self, pair: Pair):
"""Get latest price of pair."""
data = await self.api.get('public/get-ticker', {
'instrument_name': pair.value
})
return float(data['a'])
async def get_orderbook(self, pair: Pair, depth: int = 150):
"""Get the order book for a particular market."""
data = await self.api.get('public/get-book', {
'instrument_name': pair.value,
'depth': depth
})
return data[0]
async def listen_candles(self, period: Period, *pairs: List[Pair]):
if not isinstance(period, Period):
raise ValueError(f'Provide Period enum not {period}')
channels = [
f'candlestick.{period.value}.{pair.value}'
for pair in pairs
]
prev_time = {}
async for data in self.api.listen('market', *channels):
pair = Pair(data['instrument_name'])
for candle in data['data']:
current_time = int(candle['t'] / 1000)
if pair not in prev_time or current_time > prev_time[pair]:
yield Candle(
current_time,
candle['o'], candle['h'], candle['l'],
candle['c'], candle['v'],
Pair(data['instrument_name'])
)
prev_time[pair] = current_time
async def listen_trades(self, *pairs: List[Pair]):
channels = [f'trade.{pair}' for pair in pairs]
async for data in self.api.listen('market', *channels):
for trade in data['data']:
trade.pop('dataTime')
yield Trade(
trade['d'], int(trade['t'] / 100),
trade['p'], trade['q'],
OrderSide(trade['s'].upper()),
Pair(data['instrument_name'])
)
async def listen_orderbook(
self, *pairs: List[Pair], depth: int = 150) -> OrderBook:
channels = [f'book.{pair}.{depth}' for pair in pairs]
async for data in self.api.listen('market', *channels):
pair = Pair(data['instrument_name'])
buys = [
OrderInBook(*order, OrderSide.BUY)
for order in data['data'][0]['bids']
]
sells = [
OrderInBook(*order, OrderSide.SELL)
for order in reversed(data['data'][0]['asks'])
]
yield OrderBook(buys, sells, pair)
class Account:
"""Provides access to account actions and data. Balance, trades, orders."""
def __init__(
self, *, api_key: str = '', api_secret: str = '',
from_env: bool = False, api: ApiProvider = None):
if not api and not (api_key and api_secret) and not from_env:
raise ValueError(
'Pass ApiProvider or api_key with api_secret or from_env')
self.api = api or ApiProvider(
api_key=api_key, api_secret=api_secret, from_env=from_env)
async def get_balance(self):
"""Return balance."""
data = await self.api.post(
'private/get-account-summary', {'params': {}})
return {acc['currency']: acc for acc in data['accounts']}
async def get_orders(
self, pair: Pair, page: int = 0, page_size: int = 200):
"""Return all orders."""
data = await self.api.post('private/get-order-history', {
'params': {
'instrument_name': pair.value,
'page_size': page_size,
'page': page
}
})
orders = data.get('order_list') or []
for order in orders:
order['id'] = int(order.pop('order_id'))
return orders
async def get_open_orders(
self, pair: Pair, page: int = 0, page_size: int = 200):
"""Return open orders."""
data = await self.api.post('private/get-open-orders', {
'params': {
'instrument_name': pair.value,
'page_size': page_size,
'page': page
}
})
orders = data.get('order_list') or []
for order in orders:
order['id'] = int(order.pop('order_id'))
return orders
async def get_trades(
self, pair: Pair, page: int = 0, page_size: int = 200):
"""Return trades."""
data = await self.api.post('private/get-trades', {
'params': {
'instrument_name': pair.value,
'page_size': page_size,
'page': page
}
})
trades = data.get('trade_list') or []
for trade in trades:
trade['order_id'] = int(trade['order_id'])
trade['id'] = int(trade.pop('trade_id'))
return trades
async def create_order(
self, pair: Pair, side: OrderSide, type_: OrderType,
quantity: float, price: float = 0, client_id: int = None) -> int:
"""Create raw order with buy or sell side."""
data = {
'instrument_name': pair.value, 'side': side.value,
'type': type_.value
}
if type_ == OrderType.MARKET and side == OrderSide.BUY:
data['notional'] = quantity
else:
data['quantity'] = quantity
if client_id:
data['client_oid'] = str(client_id)
if price:
if type_ == OrderType.MARKET:
raise ValueError(
"Error, MARKET execution do not support price value")
data['price'] = price
resp = await self.api.post('private/create-order', {'params': data})
return int(resp['order_id'])
async def buy_limit(self, pair: Pair, quantity: float, price: float):
"""Buy limit order."""
return await self.create_order(
pair, OrderSide.BUY, OrderType.LIMIT, quantity, price
)
async def sell_limit(self, pair: Pair, quantity: float, price: float):
"""Sell limit order."""
return await self.create_order(
pair, OrderSide.SELL, OrderType.LIMIT, quantity, price
)
async def wait_for_status(
self, order_id: int, pair: Pair, statuses, delay: int = 0.5):
"""Wait for order status."""
order = await self.get_order(order_id)
for _ in range(self.api.retries):
if OrderStatus(order['status']) in statuses:
break
await asyncio.sleep(delay)
order = await self.get_order(order_id)
if OrderStatus(order['status']) not in statuses:
raise ApiError(
f"Status not changed for: {order}, must be in: {statuses}")
async def buy_market(
self, pair: Pair, spend: float, wait_for_fill=False):
"""Buy market order."""
order_id = await self.create_order(
pair, OrderSide.BUY, OrderType.MARKET, spend
)
if wait_for_fill:
await self.wait_for_status(order_id, pair, (
OrderStatus.FILLED, OrderStatus.CANCELED, OrderStatus.EXPIRED,
OrderStatus.REJECTED
))
return order_id
async def sell_market(
self, pair: Pair, quantity: float, wait_for_fill=False):
"""Sell market order."""
order_id = await self.create_order(
pair, OrderSide.SELL, OrderType.MARKET, quantity
)
if wait_for_fill:
await self.wait_for_status(order_id, pair, (
OrderStatus.FILLED, OrderStatus.CANCELED, OrderStatus.EXPIRED,
OrderStatus.REJECTED
))
return order_id
async def get_order(self, order_id: int):
"""Get order info."""
data = await self.api.post('private/get-order-detail', {
'params': {'order_id': str(order_id)}
})
data['order_info']['trade_list'] = data.pop('trade_list', [])
data['order_info']['id'] = int(data['order_info'].pop('order_id'))
return data['order_info']
async def cancel_order(
self, order_id: int, pair: Pair, wait_for_cancel=False):
"""Cancel order."""
await self.api.post('private/cancel-order', {
'params': {'order_id': order_id, 'instrument_name': pair.value}
})
if not wait_for_cancel:
return
await self.wait_for_status(order_id, pair, (
OrderStatus.CANCELED, OrderStatus.EXPIRED, OrderStatus.REJECTED
))
async def cancel_open_orders(self, pair: Pair):
"""Cancel all open orders."""
return await self.api.post('private/cancel-all-orders', {
'params': {'instrument_name': pair.value}
})
async def listen_balance(self):
async for data in self.api.listen(
'user', 'user.balance', sign=True):
for balance in data.get('data', []):
yield balance
async def listen_orders(self, pair: Pair):
async for data in self.api.listen(
'user', f'user.order.{pair.value}', sign=True):
for order in data.get('data', []):
order['id'] = int(order.pop('order_id'))
yield order
# async def listen_trades(self, pair: Pair):
# async for data in self.api.listen(
# 'user', f'user.order.{pair.value}', sign=True):
# yield data
| StarcoderdataPython |
5115216 | <filename>Ch02_Pattern_SlidingWindow/P1_MaximumSumSubarrayOfSizeK/Python/solution.py
# Copyright (c) 2021 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
class Solution:
def max_sub_array_of_size_k(self, k, arr):
max_sum , window_sum = 0, 0
window_start = 0
for window_end in range(len(arr)):
window_sum += arr[window_end] # add the next element
# slide the window, we don't need to slide if we've not hit the required window size of 'k'
if window_end >= k-1:
max_sum = max(max_sum, window_sum)
window_sum -= arr[window_start] # subtract the element going out
window_start += 1 # slide the window ahead
return max_sum
def main():
solution = Solution()
print(solution.max_sub_array_of_size_k(3, [2, 1, 5, 1, 3, 2]))
print(solution.max_sub_array_of_size_k(2, [2, 3, 4, 1, 5]))
if __name__ == '__main__':
main() | StarcoderdataPython |
4963629 |
import binascii
import sys
import Adafruit_PN532 as PN532
# Hack to make code compatible with both Python 2 and 3 (since 3 moved
# raw_input from a builtin to a different function, ugh).
try:
input = raw_input
except NameError:
pass
# PN532 configuration for a Raspberry Pi:
CS = 18
MOSI = 23
MISO = 24
SCLK = 25
# Configure the key to use for writing to the MiFare card. You probably don't
# need to change this from the default below unless you know your card has a
# different key associated with it.
CARD_KEY = [<KEY>
# Create and initialize an instance of the PN532 class.
pn532 = PN532.PN532(cs=CS, sclk=SCLK, mosi=MOSI, miso=MISO)
pn532.begin()
pn532.SAM_configuration()
# Step 1, wait for card to be present.
print('Minecraft Block NFC Writer')
print('')
print('== STEP 1 =========================')
print('Place the card to be written on the PN532...')
uid = pn532.read_passive_target()
while uid is None:
uid = pn532.read_passive_target()
print('')
print('Found card with UID: 0x{0}'.format(binascii.hexlify(uid)))
print('')
print('==============================================================')
print('WARNING: DO NOT REMOVE CARD FROM PN532 UNTIL FINISHED WRITING!')
print('==============================================================')
print('')
# Step 2, pick a block type.
print('== STEP 2 =========================')
print('Now pick a block type to write to the card.')
block_choice = None
while block_choice is None:
print('')
print('Type either L to list block types, or type the number of the desired block.')
print('')
choice = input('Enter choice (L or block #): ')
print('')
if choice.lower() == 'l':
# Print block numbers and names.
print('Number\tBlock name')
print('------\t----------')
for i, b in enumerate(mcpi_data.BLOCKS):
block_name, block_id = b
print('{0:>6}\t{1}'.format(i, block_name))
else:
# Assume a number must have been entered.
try:
block_choice = int(choice)
except ValueError:
# Something other than a number was entered. Try again.
print('Error! Unrecognized option.')
continue
# Check choice is within bounds of block numbers.
if not (0 <= block_choice < len(mcpi_data.BLOCKS)):
print('Error! Block number must be within 0 to {0}.'.format(len(mcpi_data.BLOCKS)-1))
continue
# Block was chosen, look up its name and ID.
block_name, block_id = mcpi_data.BLOCKS[block_choice]
print('You chose the block type: {0}'.format(block_name))
print('')
# Get the block subtype if it has any available.
subtype_choice = None
if block_name in mcpi_data.SUBTYPES:
print('Now pick a subtype for the block.')
print('')
print('Number\tSubtype')
print('------\t-------')
# Print all the subtypes for this block.
block_subtypes = mcpi_data.SUBTYPES[block_name]
for subtype_id, subtype_name in block_subtypes.items():
print('{0:>6}\t{1}'.format(subtype_id, subtype_name))
# Get a subtype id from the user.
while subtype_choice is None:
print('')
try:
subtype_choice = int(input('Enter subtype number: '))
except ValueError:
# Something other than a number was entered. Try again.
print('Error! Unrecognized subtype number.')
continue
if subtype_id not in block_subtypes:
print('Error! Subtype number must be one shown above!')
continue
if subtype_choice is not None:
print('You also chose the subtype: {0}'.format(block_subtypes[subtype_choice]))
print('')
# Confirm writing the block type.
print('== STEP 3 =========================')
print('Confirm you are ready to write to the card:')
print('Block: {0}'.format(block_name))
if subtype_choice is not None:
print('Subtype: {0}'.format(block_subtypes[subtype_choice]))
print('')
choice = input('Confirm card write (Y or N)? ')
if choice.lower() != 'y' and choice.lower() != 'yes':
print('Aborted!')
sys.exit(0)
print('Writing card (DO NOT REMOVE CARD FROM PN532)...')
# Write the card!
# First authenticate block 4.
if not pn532.mifare_classic_authenticate_block(uid, 4, PN532.MIFARE_CMD_AUTH_B,
CARD_KEY):
print('Error! Failed to authenticate block 4 with the card.')
sys.exit(-1)
# Next build the data to write to the card.
# Format is as follows:
# - Bytes 0-3 are a header with ASCII value 'MCPI'
# - Byte 4 is the block ID byte
# - Byte 5 is 0 if block has no subtype or 1 if block has a subtype
# - Byte 6 is the subtype byte (optional, only if byte 5 is 1)
data = bytearray(16)
data[0:4] = b'MCPI' # Header 'MCPI'
data[4] = block_id & 0xFF
if subtype_choice is not None:
data[5] = 1
data[6] = subtype_choice & 0xFF
# Finally write the card.
if not pn532.mifare_classic_write_block(4, data):
print('Error! Failed to write to the card.')
sys.exit(-1)
print('Wrote card successfully! You may now remove the card from the PN532.')
| StarcoderdataPython |
1910283 | """
Application Config
"""
FUND_STORE = (
"https://funding-service-design-fund-store-dev.london.cloudapps.digital"
)
APPLICATION_STORE = "https://funding-service-design-application-store-dev.london.cloudapps.digital" # noqa | StarcoderdataPython |
8144102 | from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
import json
from registrar.models import Course
from registrar.models import Teacher
from registrar.models import Announcement
from teacher.views import announcement
TEST_USER_EMAIL = "<EMAIL>"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "<PASSWORD>"
TEST_USER_EMAIL2 = "<EMAIL>"
TEST_USER_USERNAME2 = "whalesquid"
TEST_USER_PASSWORD2 = "<PASSWORD>"
class AnnouncementTestCase(TestCase):
def tearDown(self):
courses = Course.objects.all()
for course in courses:
course.delete()
User.objects.all().delete()
def setUp(self):
# Create our Trudy user.
User.objects.create_user(
email=TEST_USER_EMAIL2,
username=TEST_USER_USERNAME2,
password=<PASSWORD>
)
user = User.objects.get(email=TEST_USER_EMAIL2)
teacher = Teacher.objects.create(user=user)
# Create a test course
user = User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=<PASSWORD>
)
teacher = Teacher.objects.create(user=user)
course = Course.objects.create(
id=1,
title="Comics Book Course",
sub_title="The definitive course on comics!",
category="",
teacher=teacher,
)
Announcement.objects.create(
announcement_id=1,
course=course,
title='Hello world!',
body='This is the body of the message.',
)
def get_logged_in_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME,
password=<PASSWORD>PASSWORD
)
return client
def get_logged_in_trudy_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME2,
password=<PASSWORD>
)
return client
def test_url_resolves_to_announcements_page_view(self):
found = resolve('/teacher/course/1/announcement')
self.assertEqual(found.func, announcement.announcements_page)
def test_announcements_page_without_submissions(self):
try:
Announcement.objects.get(announcement_id=1).delete()
except Announcement.DoesNotExist:
pass
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/announcement')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course',response.content)
self.assertIn(b'ajax_announcement_modal(0);',response.content)
def test_announcements_page_with_submissions(self):
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/announcement')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course',response.content)
self.assertIn(b'Hello world!',response.content)
self.assertIn(b'This is the body of the message.',response.content)
def test_announcements_table_without_submissions(self):
try:
Announcement.objects.get(announcement_id=1).delete()
except Announcement.DoesNotExist:
pass
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/announcements_table')
self.assertEqual(response.status_code, 200)
self.assertIn(b'ajax_announcement_modal(0);',response.content)
def test_announcements_table_with_submissions(self):
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/announcements_table')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Hello world!',response.content)
self.assertIn(b'This is the body of the message.',response.content)
def test_announcement_modal_without_submissions(self):
try:
Announcement.objects.get(announcement_id=1).delete()
except Announcement.DoesNotExist:
pass
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/announcement_modal',{
'announcement_id': 0,
})
self.assertEqual(response.status_code, 200)
self.assertIn(b'announcement_modal',response.content)
def test_announcement_modal_with_submissions(self):
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/announcement_modal',{
'announcement_id': 1,
})
self.assertEqual(response.status_code, 200)
self.assertIn(b'announcement_modal',response.content)
def test_save_announcement_with_insert(self):
try:
Announcement.objects.get(announcement_id=1).delete()
except Announcement.DoesNotExist:
pass
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/save_announcement',{
'announcement_id': 0,
'title': 'test',
'body': 'test',
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
def test_save_announcement_with_update(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/save_announcement',{
'announcement_id': 1,
'title': 'test',
'body': 'test',
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
def test_delete_announcement_without_record(self):
try:
Announcement.objects.get(announcement_id=1).delete()
except Announcement.DoesNotExist:
pass
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/delete_announcement',{
'announcement_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'cannot find record')
self.assertEqual(array['status'], 'failed')
def test_delete_announcement_with_record_and_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/delete_announcement',{
'announcement_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'deleted')
self.assertEqual(array['status'], 'success')
def test_delete_announcement_with_record_and_incorrect_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_trudy_client()
response = client.post('/teacher/course/1/delete_announcement',{
'announcement_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'unauthorized deletion')
self.assertEqual(array['status'], 'failed')
| StarcoderdataPython |
9737374 | import logging
# We don't catch this one internallyclass OptFail(Exception):
class OptError(Exception):
def __init__(self, mesg='None given', err_type='Not specified'):
optimize_log = logging.getLogger(__name__)
optimize_log.critical('OptError: Optimization has failed.')
self.mesg = mesg
self.err_type = err_type
# Exception.__init__(self, mesg)
class AlgError(Exception):
# maybe generalize later def __init__(self, *args, **kwargs):
def __init__(self, mesg='None given', newLinearBends=None):
optimize_log = logging.getLogger(__name__)
optimize_log.error('AlgError: Exception created.\n')
if newLinearBends:
optimize_log.error('AlgError: New bends detected.\n')
self.linearBends = newLinearBends
self.mesg = mesg
class IRCendReached(Exception):
"""Quit when we have found a minimum or completed the requested steps."""
def __init__(self, mesg='None given'):
optimize_log = logging.getLogger(__name__)
| StarcoderdataPython |
5190724 | from DB2Gams_l2 import *
import ShockFunction
class gams_model:
"""
Databases: Dictionary with databases. Keys = name of database, value∈{path to gdx file, GamsDatabase, or a DataBase.py_db}.
work_folder: Point to folder where the model should be run from.
opt_file: Add options file. If None, a default options file is written (see default_opt).
"""
def __init__(self,work_folder=None,pickle_path=None,opt_file=None,execute_name='CollectAndRun.gms',settings=None,pickle_settings=None):
if pickle_path is None:
self.work_folder = work_folder if work_folder is not None else os.getcwd()
self.execute_name = execute_name
self.dbs = {}
self.ws = GamsWorkspace(working_directory = self.work_folder)
if opt_file is None:
self.opt = default_opt(self.ws,name='temp.opt')
opt_file = 'temp.opt'
else:
self.opt = self.ws.add_options(opt_file=opt_file)
self.settings = settings
self.export_settings = {'settings': 'settings_'+self.settings.name if pickle_settings is None else pickle_settings,
'out_db': None, 'opt': opt_file}
else:
self.import_from_pickle(os.path.split(pickle_path)[0],os.path.split(pickle_path)[1],work_folder)
def import_from_pickle(self,repo,pickle_name,work_folder):
"""
Import gams_model object from pickle:
(1) Add simple attributes (that are not special classes),
(2) Add workspace, opt files, databases (that are not pickleable) from settings.
"""
with open(repo+'\\'+end_w_pkl(pickle_name),"rb") as file:
self.__dict__.update(pickle.load(file).__dict__)
if work_folder is not None:
self.work_folder = work_folder
self.ws = GamsWorkspace(working_directory=self.work_folder)
self.opt = self.ws.add_options(opt_file=repo+'\\'+self.export_settings['opt'])
self.settings = gams_settings(pickle_path=repo+'\\'+self.export_settings['settings'])
self.out_db = DataBase.py_db(file_path=self.export_settings['out_db'],default_db='db_Gdx')
return self
def export(self,repo,pickle_name,inplace_db = False,**kwargs):
"""
Export gams_model instance. Note that only a subset of information are stored (not gams-objects in general).
"""
self.settings.export(repo,self.export_settings['settings'],inplace_db=inplace_db)
self.export_settings['out_db'] = self.out_db.export(repo,name=self.settings.name+'_out_db',**kwargs)
shutil.copy(self.work_folder+'\\'+self.export_settings['opt'],repo+'\\'+self.export_settings['opt'])
temp_empty_attrs = ('dbs','ws','out_db','opt','settings','job','out_db')
temp = {attr: getattr(self,attr) for attr in temp_empty_attrs}
[setattr(self,attr,None) for attr in temp_empty_attrs]
with open(repo+'\\'+end_w_pkl(pickle_name),"wb") as file:
pickle.dump(self,file)
[setattr(self,attr,temp[attr]) for attr in temp_empty_attrs];
def upd_databases(self,merge_internal=True,from_gdx=False):
"""
Read in databases, export to work_folder, and add to GamsWorkspace.
"""
for database in self.settings.databases:
if merge_internal is True:
self.settings.databases[database].merge_internal()
if from_gdx is True:
self.settings.databases[database].db_Gdx.export(self.work_folder+'\\'+end_w_gdx(database))
self.dbs[database] = self.ws.add_database_from_gdx(self.work_folder+'\\'+end_w_gdx(database))
else:
self.dbs[database] = self.ws.add_database(source_database=self.settings.databases[database].db_Gdx.database)
def run(self,model=None,run_from_job=False,options_add={},options_run={}):
"""
Create Model instance and run.
"""
if run_from_job is False:
self.model_instance(model)
self.compile_collect_file()
self.add_job(options_add)
self.run_job(options_run)
self.out_db = DataBase.py_db(database_gdx=self.job.out_db,default_db='db_Gdx')
if self.settings.solvestat is True:
self.modelstat = self.out_db[self.settings.name+'_modelstat']
self.solvestat = self.out_db[self.settings.name+'_solvestat']
def solve_sneakily(self,db_star=None,from_cp=False,cp_init=None,run_from_job=False,shock_db=None,options_run={},kwargs_shock={}):
if from_cp is False:
cp = self.ws.add_checkpoint() if cp_init is None else cp_init
self.run(model=self.settings,run_from_job=run_from_job,**{'checkpoint': cp})
if shock_db is None:
shock_db = ShockFunction.solve_sneaky_db(self.out_db,db_star,**kwargs_shock)
shock_db.db_other.export(self.work_folder+'\\'+shock_db.name+'.gdx')
shock = self.std_UEVAS_from_db(shock_db,**kwargs_shock)
self.execute_shock_from_cp(shock,shock_db.name,cp_init,options_run=options_run)
def std_UEVAS_from_db(self,shock_db,loop_name='l1',update_vars='all',shock_name='shock',**kwargs):
"""
Creates a ShockFunction that loops over values in shock_db, for variables in update_vars.
The shock_db needs to be arranged with variable names as var+'_loopval', and subsets var+'_subset' for the method to work.
"""
shock = ShockFunction.AddShocks('shock_'+self.settings.name if shock_name is None else shock_name,shock_db,loop_name)
shock.UpdateExoVarsAndSolve(self)
if update_vars=='all':
update_vars = [par.split('_loopval')[0] for par in shock_db.parameters['parameters']]
for var in update_vars:
shock.UEVAS_adjVar(var,var+'_loopval',conditions=shock_db.get(var+'_subset').to_str)
shock.UEVAS_2gmy(self.work_folder+'\\'+shock.name)
return shock
def execute_shock_from_cp(self,shock,shock_db_name,cp,options_run={}):
self.opt.defines[shock_db_name] = shock_db_name+'.gdx'
self.job = self.ws.add_job_from_file(shock.gms,**{'checkpoint': cp})
self.run(run_from_job=True,options_run=options_run)
def model_instance(self,gams_settings):
"""
Create instance of model using gams_settings (See the class gams_settings below).
(1) Adds settings to the .model attribute.
(2) Writes 'placeholders' used in the gams code to the opt.file; places where %PLACEHOLDER% is used.
(3) If a run_file is included (part where statement of fixing and solve statement is included), the
attribute self.model.run_file = NAMEOFFILE. If a run_file is not included, a default run_file
is created from a number of settings in *gams_settings* as well. See write_run_file() for more.
(4) If a collect_file is included (part where $IMPORT of components are called), the attribute
self.model.collect_file = NAMEOFFILE. If a collect_file is not included, a default file is created.
See write_collect_file() for more.
(5) The relevant files for running the model are copied to the work_folder, ready to execute.
"""
self.settings = gams_settings
self.upd_databases()
self.update_placeholders()
if self.settings.run_file is None:
self.write_run_file()
if self.settings.collect_file is None:
self.write_collect_file()
for file in self.settings.files:
if not os.path.isfile(self.work_folder+'\\'+end_w_gms(file)):
shutil.copy(self.settings.files[file]+'\\'+end_w_gms(file),self.work_folder+'\\'+end_w_gms(file))
def compile_collect_file(self):
with open(self.work_folder+'\\'+end_w_gms(self.settings.collect_file).replace('.gms','.gmy'), "w") as file:
file.write(Precompiler(self.work_folder+'\\'+end_w_gms(self.settings.collect_file))())
return self.work_folder+'\\'+end_w_gms(self.settings.collect_file).replace('.gms','.gmy')
def add_job(self,options={}):
"""
Given a model_instance is created, this creates a GamsJob by compiling the self.model.collect_file
using Precompiler from the dreamtools package. The GamsJob is added as an attribute self.job.
"""
self.compile_collect_file()
self.job = self.ws.add_job_from_file(self.work_folder+'\\'+end_w_gms(self.settings.collect_file).replace('.gms','.gmy'),**options)
return self.job
def run_job(self,options={}):
"""
Add options using dict with key = option_name, value = option.
"""
self.job.run(self.opt,databases=list(self.dbs.values()),**options)
def update_placeholders(self):
"""
Add placeholders to the options-file.
"""
[self.add_placeholder(placeholder,self.settings.placeholders[placeholder]) for placeholder in self.settings.placeholders];
def add_placeholder(self,placeholder,db):
"""
Placeholder is a dict with keys 'name' and 'db'. The value for 'name' is the placeholder used in the Gams code.
The value for 'db' is the name of the database used when initializing the 'databases' attribute in the gams_model.
NB: Currently the placeholders only include names of databases. Straightforward to extend this to more general case.
"""
self.opt.defines[placeholder] = self.dbs[db].name
def write_run_file(self):
"""
Writes a run_file for the code. This includes:
(1) If a list of exogenous groups are included in the list self.model.g_exo, these are included in a $FIX statement.
(2) If a list of endogenous groups are included in the list self.model.g_endo, these are included in an $UNFIX statement.
(3) If a list of block names are included in the list self.model.blocks, these are used to define a model with name self.model.name.
(4) If a specific solve statement is included in self.model.solve, this is used; otherwise a default solve statement is included.
(5) Once the run_file has been written, the attribute is set to the new file name, and added to the dictionary of model instance files.
"""
with open(self.work_folder+'\\'+'RunFile.gms', "w") as file:
if self.settings.g_exo is not None:
file.write("$FIX {gnames};\n\n".format(gnames=', '.join(self.settings.g_exo)))
if self.settings.g_endo is not None:
file.write("$UNFIX {gnames};\n\n".format(gnames=', '.join(self.settings.g_endo)))
if self.settings.blocks is not None:
file.write("$Model {mname} {blocks};\n\n".format(mname=self.settings.name, blocks=', '.join(self.settings.blocks)))
if self.settings.solvestat is True:
file.write(add_solvestat(self.settings.name))
if self.settings.solve is None:
file.write(default_solve(self.settings.name))
else:
file.write(self.settings.solve)
if self.settings.solvestat is True:
file.write(update_solvestat(self.settings.name))
self.settings.run_file = 'RunFile.gms'
self.settings.files['RunFile.gms'] = self.work_folder
def write_collect_file(self):
"""
Writes a collect_file for the code. This includes:
(1) The start of the code (root_file) can either be default (see read_root()), or the user can
supply its own string in self.model.root_file (NB: A file option should be included here as well).
(2) Then $IMPORT statements are included for all files in self.model.files (in the sequence they appear).
(3) If the run_file is not included in the self.model.files, it is added in the end.
(4) The attribute self.model.collect_file is updated to point to the collect_file.
"""
with open(self.work_folder+'\\'+self.execute_name, "w") as file:
file.write(self.settings.write_collect_and_run_file(self.execute_name))
class condition_tree:
"""
Small class of nesting tree for writing conditions.
"""
def __init__(self,tree=None,max_=10):
self.tree = tree
self.mapping_from_tree()
self.aggregates()
self.inputs()
self.all_elements()
self.outputs()
self.max_depth = max_
self.check_map = pd.Series(False,index=self.map_)
self.check_map[self.check_map.index.get_level_values('in').isin(self.inp)] = True
self.check_agg = pd.Series(False,index=self.agg)
self.write_element = {x:'' for x in self.all}
for x in self.inp:
self.write_element[x] = x
self.incomplete = True
def mapping_from_tree(self):
temp = []
for key in self.tree:
temp += [(value,key) for value in self.tree[key]['vals']]
self.map_ = pd.MultiIndex.from_tuples(temp,names=['in','agg'])
def aggregates(self):
self.agg = self.map_.get_level_values('agg').unique()
def inputs(self):
self.inp = pd.Index(set(self.map_.get_level_values('in'))-set(self.agg),name='in')
def all_elements(self):
self.all = pd.Index(self.agg.union(self.inp), name='all')
def outputs(self):
self.out = list((set(self.map_.get_level_values('agg'))-set(self.map_.get_level_values('in'))))[0]
def write_condition(self):
count = 0
while self.incomplete:
count += 1
pre_update = self.check_agg.copy()
self.update_aggs()
for x in self.check_agg.index:
if pre_update[x]!=self.check_agg[x]:
self.write_nest(x)
self.update_map(x)
if count==self.max_depth:
raise RuntimeError("Iterations exceeded max_depth.")
if self.check_agg[self.out]:
self.incomplete = False
return self.write_element[self.out]
def write_nest(self,agg):
self.write_element[agg] = '({x})'.format(x=self.tree[agg]['cond'].join([self.write_element[x] for x in self.tree[agg]['vals']]))
def update_aggs(self):
[self.update_agg(agg) for agg in self.agg];
def update_agg(self,agg):
self.check_agg[agg] = (self.check_map[self.check_map.index.get_level_values('agg')==agg]==True).all()
def update_map(self,inp):
self.check_map[self.check_map.index.get_level_values('in')==inp] = True
def agg_from_inp(self,inp):
return self.map_[self.map_.get_level_values('in')==inp].get_level_values('agg')[0] | StarcoderdataPython |
1603389 | # -*- coding: utf-8 -*-
"""
Created on 201906
Author : GJ
Python version:3.7
"""
import pandas as pd
from WindPy import *
import numpy as np
import xlsxwriter as xls
import datetime
import calendar
import copy
import os
from decimal import Decimal
import time
import seaborn as sns
import openpyxl
sns.set_style("white")
# 文件路径,无需设置,自动获取本程序所在文件夹
path = os.getcwd() + '\\'
# database file name
database_name = 'CN_Stock_SHHKconnect_Northbound_PyInput.xlsm'
old_output_name='CN_Stock_SHHKconnect_Northbound_PyOutput.xlsx'
# 定位某一日期在日期序列中的位置
# sele_date为选定日期
# date_range 为日期序列
# flag为参数,1为开始日期,2为结束日期
def anchor_se_date(sele_date, date_range, flag):
asd = 0
date_range = date_range.reset_index(drop=True)
range_len = len(date_range)
if type(sele_date) != type(date_range.iloc[0]):
for n in range(0, range_len):
str_d = str(date_range.iloc[n])
str_d = str_d.replace("-", "")
str_d = int(str_d[0:8])
date_range.iloc[n] = str_d
if sele_date <= date_range.iloc[0]:
asd = 0
elif sele_date >= date_range.iloc[range_len - 1]:
asd = range_len - 1
else:
for m in range(0, range_len - 1):
if flag == 1:
if sele_date > date_range.iloc[m] and sele_date <= date_range.iloc[m + 1]:
asd = m + 1
break
elif flag == 2:
if sele_date >= date_range.iloc[m] and sele_date < date_range.iloc[m + 1]:
asd = m
break
return asd
# 生成指标名函数
def result_col_str(row_no):
result_cs = pd.DataFrame(data=None, index=range(0, row_no), columns=['indicator_name', 'unit', 'eng_name'])
result_cs.iloc[0, 0] = 'indicator_name'
result_cs.iloc[0, 1] = 'unit'
result_cs.iloc[0, 2] = 'eng_name'
result_cs2 = pd.DataFrame(data=None, index=range(0, row_no), columns=['indicator_name', 'unit', 'eng_name'])
result_cs_col = pd.DataFrame(data=None, index=['c'], columns=['indicator_name', 'unit', 'eng_name'])
result_cs_col.iloc[0, 0] = 'indicator_name'
result_cs_col.iloc[0, 1] = 'unit'
result_cs_col.iloc[0, 2] = 'eng_name'
return result_cs, result_cs2, result_cs_col
#将日期格式转化为整形格式
def date_to_int_m(date_range, col):
str_mon = str(date_range.iloc[col])
str_mon = str_mon.replace("-", "")
str_month = int(str_mon[4:6])
str_year = int(str_mon[0:4])
str_day = int(str_mon[6:8])
str_date = int(str_mon[0:8])
return str_date, str_year, str_month, str_day
def clear_df(df_data,row_no,col_no):
clear_df = df_data.iloc[row_no:df_data.shape[0], col_no:df_data.shape[1]]
clear_df = clear_df.reset_index(drop=True)
clear_df = clear_df.T.reset_index(drop=True).T
return clear_df
def copy_df(v_data,col_count):
copy_df=pd.DataFrame(data=None,index=range(0,len(v_data)),columns=range(0,col_count))
for col in range(0,col_count):
copy_df.iloc[:,col]=v_data
return copy_df
def cal_simple_index(holdshare,close,adj,ind,cs_i,eight_sector_dict,main_index_dict,date_range,mom_range,anchor_date,csi_index,index_chn_str,index_eng_str,index_unit_str):
hs_data = clear_df(holdshare, 2, 1)
# hs_data.fillna(0, inplace=True)
date_range = holdshare.iloc[0, 1:holdshare.shape[1]]
date_range = date_range.reset_index(drop=True)
long_range = close.iloc[0, 1:close.shape[1]]
long_range = long_range.reset_index(drop=True)
vertical_range = close.iloc[0, 2:close.shape[1]]
vertical_range = vertical_range.reset_index(drop=True)
close_data = clear_df(close, 2, 1)
close_data.fillna(0, inplace=True)
hs_long = pd.DataFrame(data=None,index=range(0,close_data.shape[0]),columns=range(0,close_data.shape[1]))
n=0
for m in range (0,close_data.shape[1]):
mx=date_range[n]-long_range[m]
if mx==0:
hs_long.iloc[:,m]=hs_data.iloc[:,n]
n=n+1
elif mx>0:
hs_long.iloc[:,m]=hs_data.iloc[:,n-1]
hs_now_bool=clear_df(hs_long,0,1)
# hs_long_data.fillna(0, inplace=True)
hs_now_bool[hs_now_bool.notna()] = 1
hs_now_bool.fillna(0, inplace=True)
hs_yestoday_data = hs_long.iloc[0:hs_long.shape[0], 0:hs_long.shape[1] - 1]
hs_yestoday_data = hs_yestoday_data.reset_index(drop=True)
hs_yestoday_data = hs_yestoday_data.T.reset_index(drop=True).T
hs_yestoday_bool = hs_yestoday_data.copy()
hs_yestoday_data.fillna(0, inplace=True)
hs_yestoday_bool[hs_yestoday_bool.notna()] = 1
hs_yestoday_bool.fillna(0, inplace=True)
close_now_data=clear_df(close,2,2)
close_now_data.fillna(0, inplace=True)
close_yestoday_data = close.iloc[2:close.shape[0], 1:close.shape[1] - 1]
close_yestoday_data = close_yestoday_data.reset_index(drop=True)
close_yestoday_data = close_yestoday_data.T.reset_index(drop=True).T
adj_now_data=clear_df(adj,2,2)
adj_now_data.fillna(0, inplace=True)
adj_yestoday_data = adj.iloc[2:adj.shape[0], 1:adj.shape[1] - 1]
adj_yestoday_data = adj_yestoday_data.reset_index(drop=True)
adj_yestoday_data = adj_yestoday_data.T.reset_index(drop=True).T
numerator=hs_yestoday_bool*hs_now_bool*adj_now_data*close_now_data*hs_yestoday_data/adj_yestoday_data
denominator=hs_yestoday_bool*hs_now_bool*hs_yestoday_data*close_yestoday_data
numerator.fillna(0,inplace=True)
denominator.fillna(0,inplace=True)
num_all=cal_vertical_archi(numerator, ind, cs_i, eight_sector_dict, main_index_dict, vertical_range, vertical_range, index_chn_str,index_eng_str,index_unit_str,csi_index)
denom_all=cal_vertical_archi(denominator, ind, cs_i, eight_sector_dict, main_index_dict, vertical_range, vertical_range, "北上资金周度指数_",'North_Index_W_','点',csi_index)
num_data=clear_df(num_all,1,3)
denom_data=clear_df(denom_all,1,3)
denom_data [denom_data==0]=np.nan
dod=num_data/denom_data
dod.fillna(1, inplace=True)
# dod= numerator.sum()/denominator.sum()
index_all=pd.DataFrame(data=None,index=range(0,dod.shape[0]),columns=range(0,dod.shape[1]+1))
index_all.iloc[:,0]=100
for p in range(1, dod.shape[1] + 1):
index_all.iloc[:, p] = index_all.iloc[:, p - 1] * dod.iloc[:, p - 1]
for anchor_no in range(1,len(long_range)):
if (long_range[anchor_no-1])<=anchor_date[0] and (long_range[anchor_no])>anchor_date[0]:
anchor_index=anchor_no-1
break
anchor_df=index_all.iloc[:,anchor_index]
code_df = pd.concat([anchor_df] * (index_all.shape[1]), axis=1)
code_df = code_df.T.reset_index(drop=True).T
code_df = code_df.reset_index(drop=True)
# 拼接旧基准
old_index_df=pd.concat([anchor_date[1]]* (index_all.shape[1]), axis=1)
old_index_df=old_index_df.T.reset_index(drop=True).T
old_index_df = old_index_df.reset_index(drop=True)
anchor_index_df=index_all/code_df*old_index_df
right_col = pd.DataFrame(data=long_range).T
right_col = right_col.rename({right_col.index[0]: 'c'}, axis='index')
right_data = pd.concat([right_col,anchor_index_df])
mon_left=num_all.iloc[:,0:3]
# week_left=denom_all.iloc[:,0:3]
mon_all=pd.concat([mon_left,right_data],axis=1)
# week_all=pd.concat([week_left,right_data],axis=1)
mon_result=slice_fmkt(mon_all,mom_range)
# week_result=slice_fmkt(week_all,wow_range)
return mon_result
def cal_idex(holdshare,vwap,div_df,close):
hs_data = clear_df(holdshare, 2, 2)
hs_data.fillna(0, inplace=True)
close_data=clear_df(close,2,2)
close_data.fillna(0,inplace=True)
hs_data_bool = clear_df(holdshare, 2, 2)
hs_data_bool[hs_data_bool.notna()] = 1
hs_data_bool.fillna(0, inplace=True)
vwap_data = clear_df(vwap, 2, 2)
vwap_data.fillna(0, inplace=True)
hs_yestoday_data = holdshare.iloc[2:holdshare.shape[0], 1:holdshare.shape[1] - 1]
hs_yestoday_data = hs_yestoday_data.reset_index(drop=True)
hs_yestoday_data = hs_yestoday_data.T.reset_index(drop=True).T
hs_yestoday_bool = hs_yestoday_data.copy()
hs_yestoday_data.fillna(0, inplace=True)
close_yestoday_data=close.iloc[2:close.shape[0], 1:close.shape[1] - 1]
close_yestoday_data=close_yestoday_data.reset_index(drop=True)
close_yestoday_data = close_yestoday_data.T.reset_index(drop=True).T
hs_yestoday_bool[hs_yestoday_bool.notna()] = 1
hs_yestoday_bool.fillna(0, inplace=True)
# 两个bool变量是为了做到可比口径,也为了避免wind数据错误
delta_data = (hs_data - hs_yestoday_data * div_df) * vwap_data * hs_data_bool * hs_yestoday_bool
return delta_data
def cal_ind_flow(holdshare,vwap,div_df):
hs_data=clear_df(holdshare,2,2)
hs_data.fillna(0, inplace=True)
hs_data_bool=clear_df(holdshare,2,2)
hs_data_bool[hs_data_bool.notna()]=1
hs_data_bool.fillna(0, inplace=True)
vwap_data=clear_df(vwap,2,2)
vwap_data.fillna(0, inplace=True)
hs_yestoday_data = holdshare.iloc[2:holdshare.shape[0], 1:holdshare.shape[1]-1]
hs_yestoday_data = hs_yestoday_data.reset_index(drop=True)
hs_yestoday_data = hs_yestoday_data.T.reset_index(drop=True).T
hs_yestoday_bool = hs_yestoday_data.copy()
hs_yestoday_data.fillna(0, inplace=True)
hs_yestoday_bool[hs_yestoday_bool.notna()] = 1
hs_yestoday_bool.fillna(0, inplace=True)
# 两个bool变量是为了做到可比口径,也为了避免wind数据错误
delta_data = (hs_data-hs_yestoday_data*div_df)*vwap_data*hs_data_bool*hs_yestoday_bool
return delta_data
def cal_vertical_archi(delta_data,ind,dict,eight_sector_dict,main_index_dict,week_range,write_date_range,chn_str,eng_str,unitstr,csi_index):
code_data = pd.DataFrame(data=None, index=range(0, delta_data.shape[0]), columns=range(0, len(week_range)))
ind_col_select = select_data(ind,week_range)
ind_data_clear= clear_df(ind,2,1)
ind_data = ind_data_clear.iloc[:,ind_col_select[0]]
ind_data = clear_df(ind_data,0,0)
left_df = result_col_str(dict.shape[0])
left_df_main_index=result_col_str(main_index_dict.shape[0])
left_df_eight = result_col_str(eight_sector_dict.shape[0])
right_col = pd.DataFrame(data=write_date_range).T
right_col = right_col.rename({right_col.index[0]: 'c'}, axis='index')
right_data = pd.DataFrame(data=0, index=range(0, dict.shape[0]), columns=range(0, len(week_range)))
right_data_main_index = pd.DataFrame(data=0, index=range(0, main_index_dict.shape[0]), columns=range(0, len(week_range)))
right_data_eight = pd.DataFrame(data=0, index=range(0, eight_sector_dict.shape[0]), columns=range(0, len(week_range)))
code_list=ind.iloc[2:ind.shape[0],0]
left_df_main_index[1].iloc[0, 0] = chn_str + main_index_dict.iloc[0, 0]
left_df_main_index[1].iloc[0, 1] = unitstr
left_df_main_index[1].iloc[0, 2] = eng_str + main_index_dict.iloc[0, 1]
right_data_main_index.iloc[0,:]= delta_data.sum()/100000000
r_style=['','^6','^(0|3)','^00(0|1)','^00(2|3)','^3']
csi_index_no=0
for main_no in range(1,main_index_dict.shape[0]):
if main_index_dict.iloc[main_no,1][0:3]=='CSI':
csi_col_select = select_data(csi_index[csi_index_no], week_range)
csi_clear_now=clear_df(csi_index[csi_index_no],2,1)
csi_data = csi_clear_now.iloc[:, csi_col_select[0]]
csi_data = clear_df(csi_data, 0, 0)
csi_data[csi_data!="是"]=0
csi_data[csi_data == "是"] = 1
code_df=csi_data
csi_index_no=csi_index_no+1
else:
code_list_now = code_list.copy()
code_list_now[code_list_now.str.contains(r_style[main_no])]=1
code_df = pd.concat([code_list_now] * (delta_data.shape[1]), axis=1)
code_df = code_df.T.reset_index(drop=True).T
code_df = code_df.reset_index(drop=True)
code_df[code_df != 1] = 0
left_df_main_index[1].iloc[main_no, 0] = chn_str + main_index_dict.iloc[main_no, 0]
left_df_main_index[1].iloc[main_no, 1] = unitstr
left_df_main_index[1].iloc[main_no, 2] = eng_str + main_index_dict.iloc[main_no, 1]
main_data=delta_data*code_df
right_data_main_index.iloc[main_no, :] = main_data.sum() / 100000000
for cs_no in range(0, dict.shape[0]):
now_ind_df = ind_data.copy()
now_ind_df[now_ind_df == dict.iloc[cs_no, 2]] = 1
now_ind_df[now_ind_df != 1] = 0
# testx=now_ind_df*delta_data/1000000
# amx=testx[testx!=0]
# bmx=amx.dropna(how='all')
right_data.iloc[cs_no, :] = (now_ind_df * delta_data).sum() / 100000000
left_df[1].iloc[cs_no, 0] = chn_str + dict.iloc[cs_no, 1]
left_df[1].iloc[cs_no, 1] = unitstr
left_df[1].iloc[cs_no, 2] = eng_str + dict.iloc[cs_no, 3]
am = eight_sector_dict[eight_sector_dict['FOUR_SECTOR'] == dict.iloc[cs_no, 0]].index.tolist()
if am!=[]:
right_data_eight.iloc[am[0],:]=right_data_eight.iloc[am[0],:]+right_data.iloc[cs_no,:]
left_df_eight[1].iloc[am[0], 0] = chn_str + eight_sector_dict.iloc[am[0], 0]
left_df_eight[1].iloc[am[0], 1] = unitstr
left_df_eight[1].iloc[am[0], 2] = eng_str + eight_sector_dict.iloc[am[0], 1]
main_index_result=pd.concat([left_df_main_index[1],right_data_main_index],axis=1)
eight_result=pd.concat([left_df_eight[1],right_data_eight],axis=1)
cs_result=pd.concat([left_df[1],right_data],axis=1)
col_result=pd.concat([left_df[2],right_col],axis=1)
result=pd.concat([main_index_result,eight_result,cs_result])
result=result.reset_index(drop=True)
f_result=pd.concat([col_result,result])
return f_result
# 合并压缩数据
def cal_push_data(delta_data,ind,dict,eight_sector_dict,main_index_dict,week_range,date_range,chn_str,eng_str,edb,csi_index):
#sector_day_data = pd.DataFrame(data=0, index=range(0, dict.shape[0]), columns=range(0, delta_data.shape[1]))
day_data=cal_vertical_archi(delta_data,ind,dict,eight_sector_dict,main_index_dict,date_range,date_range,chn_str,eng_str,'亿元',csi_index)
sector_day_data=clear_df(day_data,1,3)
right_col = pd.DataFrame(data=None, index=['c'], columns=range(0, len(week_range[0])))
right_data = pd.DataFrame(data=0, index=range(0, sector_day_data.shape[0]), columns=range(0, len(week_range[0])))
edb_col_select=select_data(edb,date_range)
edb_clear_data=clear_df(edb,2,1)
edb_clear_select = edb_clear_data.iloc[:,edb_col_select[0]]
edb_clear = clear_df(edb_clear_select,0,0)
sector_day_data.iloc[0:edb_clear.shape[0],0:edb_clear.shape[1]]=edb_clear
for week_no in range(0, len(week_range[0])):
right_col.iloc[0, week_no] = str(week_range[2][week_no])
for day_no in range(0, len(date_range)):
if week_range[1][week_no] < date_range[day_no] and week_range[1][week_no + 1] >= date_range[day_no]:
right_data.iloc[:, week_no] = right_data.iloc[:, week_no] + sector_day_data.iloc[:, day_no]
elif week_range[1][week_no + 1] < date_range[day_no]:
break
result_left = day_data.iloc[0:day_data.shape[0],0:3]
result_right = pd.concat([right_col, right_data])
result_df = pd.concat([result_left, result_right], axis=1)
return result_df
# 计算分红拆股df
def cal_div_df(divcap,exdate,date_range):
divcap_data=divcap.iloc[2:divcap.shape[0],1:divcap.shape[1]]
divcap_data=divcap_data.reset_index(drop=True)
divcap_data = divcap_data.T.reset_index(drop=True).T
exdate_data = exdate.iloc[2:exdate.shape[0], 1:exdate.shape[1]]
exdate_data = exdate_data.reset_index(drop=True)
exdate_data = exdate_data.T.reset_index(drop=True).T
divcap_data.fillna(0,inplace=True)
exdate_data.fillna(0,inplace=True)
div_df= pd.DataFrame(data=1,index=range(0,divcap_data.shape[0]),columns=range(0,len(date_range)))
date_df=copy_df(date_range,divcap_data.shape[0]).T
for div_col in range(0,divcap_data.shape[1]):
cap_df=copy_df(divcap_data.iloc[:,div_col],len(date_range))
exdate_df=copy_df(exdate_data.iloc[:,div_col],len(date_range))
delta_date_df=date_df - exdate_df
product_df= pd.DataFrame(data=0,index=range(0,divcap_data.shape[0]),columns=range(0,len(date_range)))
for m in range(0,delta_date_df.shape[0]):
if delta_date_df.iloc[m,0]<=0 and delta_date_df.iloc[m,delta_date_df.shape[1]-1]>=0 :
for n in range(0,delta_date_df.shape[1]):
if n==0:
if delta_date_df.iloc[m,n]==0:
product_df.iloc[m,n]=cap_df.iloc[m,n]
break
else:
if delta_date_df.iloc[m, n] == 0:
product_df.iloc[m, n] = cap_df.iloc[m, n]
break
elif delta_date_df.iloc[m, n-1] < 0 and delta_date_df.iloc[m, n] > 0 :
product_df.iloc[m, n] = cap_df.iloc[m, n]
break
div_df=div_df+product_df
return div_df
# 从日度数据截取周度、月度数据
def select_data(holdshare,mom_range):
hs_date = holdshare.iloc[0, 1:holdshare.shape[1]]
hs_date = hs_date.reset_index(drop=True)
sele_date=[]
for m in range(0,len(mom_range)):
divid_date=hs_date-mom_range[m]
for n in range(0,len(divid_date)):
if n==0:
if divid_date[n]==0:
sele_date.append(n)
break
else:
if divid_date[n]==0:
sele_date.append(n)
break
elif divid_date[n-1]<0 and divid_date[n]>0:
sele_date.append(n-1)
break
elif divid_date[len(divid_date)-1]<0 and n==len(divid_date)-1:
sele_date.append(n)
return sele_date,hs_date[sele_date]
# 计算持仓
def cal_hold(holdshare,close,mom_range):
hs_col_list=select_data(holdshare,mom_range[0])
close_col_list=select_data(close,mom_range[0])
hs_data = clear_df(holdshare, 2, 1)
hs_data.fillna(0, inplace=True)
close_data = clear_df(close,2,1)
close_data.fillna(0,inplace=True)
hs_sele_data=hs_data.iloc[:,hs_col_list[0]]
close_sele_data=close_data.iloc[:,close_col_list[0]]
hs_sele_data_clear = clear_df(hs_sele_data,0,0)
close_sele_data_clear =clear_df(close_sele_data,0,0)
hold_data=hs_sele_data_clear*close_sele_data_clear
return hold_data
def cal_float_mkt(fmk,cs_i,eight_sector_dict,main_index):
fmk_data=clear_df(fmk,2,1)
fmk_include_code=clear_df(fmk,2,0)
# 43为静态量,如果统计数量有变,则需要手动修改
fmk_result=pd.DataFrame(data=None, index=range(0,46), columns=range(0, fmk_data.shape[1]))
fmk_result.iloc[0:9,:]= fmk_data.iloc[0:9,:]/100000000
left_df = result_col_str(46)
left_df[1].iloc[0:9,0]= '流通市值_' + main_index.iloc[:,0]
left_df[1].iloc[0:9, 1] = '亿元'
left_df[1].iloc[0:9, 2] = 'Floating_MarktCap_' + main_index.iloc[:, 1]
right_col = pd.DataFrame(data=fmk.iloc[0,1:fmk.shape[1]]).T
right_col = right_col.T.reset_index(drop=True).T
right_col = right_col.rename({right_col.index[0]: 'c'}, axis='index')
for m in range(0,eight_sector_dict.shape[0]):
now_ind = eight_sector_dict.iloc[m, 0]
cs_now = cs_i[cs_i.CS_I_FS==now_ind]
sector_merge=pd.merge(cs_now, fmk_include_code, how='left', left_on='CS_I_SECTOR', right_on=0)
sector_merge_clear=clear_df(sector_merge,0,5)
left_df[1].iloc[9 + m,0]='流通市值_' + eight_sector_dict.iloc[m,0]
left_df[1].iloc[9 + m, 1] = '亿元'
left_df[1].iloc[9 + m, 2]='Floating_MarktCap_' + eight_sector_dict.iloc[m, 1]
fmk_result.iloc[9 + m, :] = sector_merge_clear.sum()/100000000
for n in range(0,cs_i.shape[0]):
now_ind = cs_i.iloc[n, 2]
cs_now = pd.DataFrame(data=now_ind,index=[0],columns=['CS_I_SECTOR'])
sector_merge = pd.merge(cs_now, fmk_include_code, how='left', left_on='CS_I_SECTOR', right_on=0)
sector_merge_clear = clear_df(sector_merge, 0, 2)
left_df[1].iloc[17 + n, 0] = '流通市值_' + cs_i.iloc[n, 1]
left_df[1].iloc[17 + n, 1] = '亿元'
left_df[1].iloc[17 + n, 2] = 'Floating_MarktCap_' + cs_i.iloc[n, 3]
fmk_result.iloc[17 + n, :] = sector_merge_clear.sum()/100000000
cs_result = pd.concat([left_df[1], fmk_result], axis=1)
col_result = pd.concat([left_df[2], right_col], axis=1)
f_result = pd.concat([col_result, cs_result])
return f_result,fmk_result
def cal_fund_index(edb,date_range,mom_range,chnstr,engstr,unitstr):
edb_col_select = select_data(edb, date_range)
edb_clear_data = clear_df(edb, 2, 1)
edb_clear_select = edb_clear_data.iloc[:, edb_col_select[0]]
edb_clear = clear_df(edb_clear_select, 0, 0)
left_df = result_col_str(1)
left_df[1].iloc[0, 0] = chnstr
left_df[1].iloc[0, 1] = unitstr
left_df[1].iloc[0, 2] = engstr
right_col = pd.DataFrame(data=date_range).T
right_col = right_col.rename({right_col.index[0]: 'c'}, axis='index')
right=pd.concat([right_col,edb_clear])
right=right.iloc[:,2:right.shape[1]]
right=right.T.reset_index(drop=True).T
left=pd.concat([left_df[2],left_df[1]])
full_result=pd.concat([left,right],axis=1)
slice_result=slice_fmkt(full_result,mom_range)
return slice_result
def slice_fmkt(fmkt,mom_range):
fmkt_bridge=clear_df(fmkt,0,2)
sele_col_list=select_data(fmkt_bridge,mom_range[0])
fmkt_data=clear_df(fmkt,1,3)
mkt_sele_data=fmkt_data.iloc[:,sele_col_list[0]]
mkt_sele_data_clear=clear_df(mkt_sele_data,0,0)
right_col = pd.DataFrame(data=mom_range[2]).T
right_col = right_col.rename({right_col.index[0]: 'c'}, axis='index')
right_data = pd.DataFrame(data=mkt_sele_data_clear, index=range(0, mkt_sele_data_clear.shape[0]), columns=range(0, len(mom_range[2])))
left=fmkt.iloc[:,0:3]
right = pd.concat([right_col,right_data])
return pd.concat([left,right],axis=1)
def slice_index(index_data,mom_range,chnstr,engstr):
hs_col_list = select_data(index_data, mom_range[0])
index_clear=clear_df(index_data,1,1)
index_sele_data=index_clear.iloc[:,hs_col_list[0]]
index_sele_data_clear=clear_df(index_sele_data,0,0)
left_df = result_col_str(1)
right_col = pd.DataFrame(data=mom_range[2]).T
right_col = right_col.rename({right_col.index[0]: 'c'}, axis='index')
right_data = pd.DataFrame(data=index_sele_data_clear, index=range(0, 1), columns=range(0, len(mom_range[2])))
left_df[1].iloc[0,0]='北上资金' + chnstr
left_df[1].iloc[0, 1] = '点'
left_df[1].iloc[0, 2] = 'North' + engstr
up_result=pd.concat([left_df[2],right_col],axis=1)
donw_result=pd.concat([left_df[1],right_data],axis=1)
result=pd.concat([up_result,donw_result])
return result
# 指数定基2017/1/31
# 将数据结果写入excel文件
def write_result_excel(pys_result, workbook, worksheetname):
worksheet = workbook.add_worksheet(worksheetname)
worksheet.set_column('A:A', 28)
worksheet.set_column('B:B', 8)
worksheet.set_column('C:C', 20)
# 设定格式
format_column = workbook.add_format(
{'text_wrap': False, 'font_name': 'Times New Roman', 'font_size': 10, 'align': 'vcenter'})
format_decimal = workbook.add_format(
{'text_wrap': False, 'font_name': 'Times New Roman', 'font_size': 10, 'num_format': '#,##0.0_ ',
'align': 'vcenter'})
format_date = workbook.add_format(
{'text_wrap': False, 'font_name': 'Times New Roman', 'font_size': 10, 'num_format': 'yyyymmdd',
'align': 'vcenter'})
pys_result = pys_result.where(pys_result.notnull(), '')
# 写入列名
for date_col in range(3, pys_result.shape[1]):
this_date=pys_result.iloc[0, date_col]
if isinstance(this_date,int)==False :
this_date=int(this_date.strftime('%Y%m%d'))
date_str =datetime.datetime.strptime(str(this_date), '%Y%m%d')
worksheet.write_datetime(0, date_col, date_str, format_date)
# 写入列
for r_col in range(0, pys_result.shape[1]):
if r_col < 3:
worksheet.write_column(1, r_col, pys_result.iloc[1:pys_result.shape[0], r_col], format_column)
else:
worksheet.write_column(1, r_col, pys_result.iloc[1:pys_result.shape[0], r_col], format_decimal)
worksheet.freeze_panes('D2')
# 为输出到同一excel,删除第一行列名数据
def del_colname(data_result):
data_f=data_result.iloc[1:data_result.shape[0],:]
return data_f
def crop_week(week_range,date_range):
for m in range(0,len(week_range)):
if week_range[m]>=date_range[0]:
break
for n in range(0,len(week_range)):
if week_range[n]>=date_range[len(date_range)-1]:
break
crop_week=week_range[m:n+1]
crop_week=crop_week.reset_index(drop=True)
crop_week3=crop_week.copy()
crop_week3[len(crop_week3)-1]=date_range[len(date_range)-1]
crop_week2=week_range[m-1:n+1]
crop_week2 = crop_week2.reset_index(drop=True)
return crop_week,crop_week2,crop_week3
def find_last_col(last_date,result_df_b):
last_date_int=int(last_date.strftime('%Y%m%d'))
for m in range(3,result_df_b.shape[1]):
if result_df_b.iloc[0,m]==last_date_int:
new_col=m+1
break
return new_col
# 生成结果存放excel文件,保存目录为程序运行同目录,命名方式为:Result_ + 保存时时间.xlsx
workbook = xls.Workbook(
path + 'CN_Stock_SHHKconnect_Northbound_PyOutput' + '.xlsx',
{'nan_inf_to_errors': True}) #time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) +
# 读取板块列表文件
old_week_df=pd.read_excel(path+old_output_name,'Python_Week',header=None)
old_month_df=pd.read_excel(path+old_output_name,'Python_Month',header=None)
eng_array=old_week_df.iloc[:,2]
eng_list=[]
eng_logi=eng_array.str.contains('_Index_')
for m in range(1,len(eng_array)):
if eng_logi[m]==True :
eng_list.append(m)
month_anchor_index=old_month_df.iloc[eng_list,old_month_df.shape[1]-2]
week_anchor_index=old_week_df.iloc[eng_list,old_week_df.shape[1]-2]
month_anchor_date=old_month_df.iloc[0,old_month_df.shape[1]-2].strftime('%Y%m%d')
week_anchor_date=old_week_df.iloc[0,old_week_df.shape[1]-2].strftime('%Y%m%d')
anchor_data_month=(int(month_anchor_date),month_anchor_index)
anchor_data_week=(int(week_anchor_date),week_anchor_index)
slice_old_mom=old_month_df.iloc[:,0:old_month_df.shape[1]-1]
slice_old_wow=old_week_df.iloc[:,0:old_week_df.shape[1]-1]
holdshare = pd.read_excel(path + database_name, 'holdshare', header=None)
close = pd.read_excel(path + database_name, 'close', header=None)
stock_adj = pd.read_excel(path + database_name, 'adjfactor', header=None)
vwap = pd.read_excel(path + database_name, 'vwap', header=None)
ind = pd.read_excel(path + database_name, 'ind', header=None)
fmk = pd.read_excel(path + database_name, 'Floating_MktCap', header=None)
dict = pd.read_excel(path + database_name, 'sector_list', header=0)
date_sheet=pd.read_excel(path + database_name, 'date', header=None)
fund=pd.read_excel(path + database_name, 'fund_index', header=None)
csi300=pd.read_excel(path + database_name, 'csi300', header=None)
csi500=pd.read_excel(path + database_name, 'csi500', header=None)
csi1000=pd.read_excel(path + database_name, 'csi1000', header=None)
csi_index=(csi300,csi500,csi1000)
week_range=date_sheet.iloc[2:date_sheet.shape[0],0]
week_range=week_range.reset_index(drop=True)
mon_range=date_sheet.iloc[2:date_sheet.shape[0],3]
mon_range=mon_range.reset_index(drop=True)
divcap = pd.read_excel(path + database_name, 'div_cap', header=None)
divexdate = pd.read_excel(path + database_name, 'div_exdate', header=None)
edb_data = pd.read_excel(path + database_name, 'edb', header=None)
date_range=holdshare.iloc[0,2:holdshare.shape[1]]
date_range=date_range.reset_index(drop=True)
date_range_long=close.iloc[0,2:close.shape[1]]
date_range_long=date_range_long.reset_index(drop=True)
cs_i = dict.loc[:, ['CS_I_FS', 'CS_I','CS_I_SECTOR', 'CS_I_ENG']]
eight_sector_dict = dict.loc[:,['FOUR_SECTOR','FS_ENG']]
main_index_dict = dict.loc[:,['Main_Index','Main_Index_Eng']]
cs_i=cs_i.dropna(how='all')
eight_sector_dict =eight_sector_dict.dropna(how='all')
main_index_dict=main_index_dict.dropna(how='all')
float_mkt = cal_float_mkt(fmk,cs_i,eight_sector_dict,main_index_dict)
wow_range=crop_week(week_range,date_range)
mom_range=crop_week(mon_range,date_range)
fmkt_month=slice_fmkt(float_mkt[0],mom_range)
fmkt_week=slice_fmkt(float_mkt[0],wow_range)
div_df=cal_div_df(divcap,divexdate,date_range)
fund_mindex=cal_fund_index(fund,date_range_long,mom_range,'中证股票基金月度指数','CSI_Stock_Fund_M_Index','点')
fund_windex=cal_fund_index(fund,date_range_long,wow_range,'中证股票基金周度指数','CSI_Stock_Fund_W_Index','点')
simple_index_month = cal_simple_index(holdshare,close,stock_adj,ind,cs_i,eight_sector_dict,main_index_dict,date_range,mom_range,anchor_data_month,csi_index,'北上资金月度指数_','North_Index_M','点')
simple_index_week = cal_simple_index(holdshare,close,stock_adj,ind,cs_i,eight_sector_dict,main_index_dict,date_range,wow_range,anchor_data_week,csi_index,'北上资金周度指数_','North_Index_W','点')
hold_data_month = cal_hold(holdshare,close,mom_range)
result_hold_month = cal_vertical_archi(hold_data_month,ind,cs_i,eight_sector_dict,main_index_dict,mom_range[0],mom_range[2],'月度持仓_','North_Position_M_','亿元',csi_index)
hold_data_week = cal_hold(holdshare,close,wow_range)
result_hold_week = cal_vertical_archi(hold_data_week,ind,cs_i,eight_sector_dict,main_index_dict,wow_range[0],wow_range[2],'周度持仓_','North_Position_W_','亿元',csi_index)
delta_data = cal_ind_flow(holdshare, vwap, div_df)
ind_flow_wow=cal_push_data(delta_data,ind,cs_i,eight_sector_dict,main_index_dict,wow_range,date_range,'周度资金净流入_','North_Netinflow_W_',edb_data,csi_index)
ind_flow_mom=cal_push_data(delta_data,ind,cs_i,eight_sector_dict,main_index_dict,mom_range,date_range,'月度资金净流入_','North_Netinflow_M_',edb_data,csi_index)
wow_result=pd.concat([fund_windex,del_colname(simple_index_week),del_colname(ind_flow_wow) ,del_colname(result_hold_week),del_colname(fmkt_week)])
mom_result=pd.concat([fund_mindex,del_colname(simple_index_month),del_colname(ind_flow_mom),del_colname(result_hold_month),del_colname(fmkt_month)])
new_wow_result=clear_df(wow_result,0,find_last_col(old_week_df.iloc[0,old_week_df.shape[1]-2],wow_result))
new_mom_result=clear_df(mom_result,0,find_last_col(old_month_df.iloc[0,old_month_df.shape[1]-2],mom_result))
final_mom=pd.concat([slice_old_mom,new_mom_result],axis=1)
final_mom=clear_df(final_mom,0,0)
final_wow=pd.concat([slice_old_wow,new_wow_result],axis=1)
final_wow=clear_df(final_wow,0,0)
write_result_excel(final_mom, workbook, 'Python_Month')
write_result_excel(final_wow, workbook, 'Python_Week')
# write_result_excel(result_hold_month, workbook, 'Python_Month')
# write_result_excel(result_hold_week, workbook, 'Python_Week')
workbook.close()
print("Done!")
| StarcoderdataPython |
4985098 | <filename>0642 Number of K-Divisible Sublists.py
class Solution:
def solve(self, nums, k):
nums.insert(0,0)
counts = defaultdict(int, {0:1})
ans = 0
for i in range(1,len(nums)):
nums[i] += nums[i-1]
m = nums[i]%k
ans += counts[m]
counts[m] += 1
return ans
| StarcoderdataPython |
1715709 | <reponame>py-graphit/py-graphit
# -*- coding: utf-8 -*-
"""
file: graph_arraystorage_driver.py
Classes that store nodes, edges and their attributes as numpy arrays using the
Pandas package
"""
import weakref
import logging
from collections import MutableMapping
from numpy import nan as Nan
from pandas import DataFrame, Series
from graphit import __module__
from graphit.graph_storage_drivers.graph_driver_baseclass import GraphDriverBaseClass
from graphit.graph_storage_drivers.graph_dictstorage_driver import DictStorage
from graphit.graph_storage_drivers.graph_storage_views import AdjacencyView
__all__ = ['ArrayStorage', 'init_arraystorage_driver']
logger = logging.getLogger(__module__)
def init_arraystorage_driver(nodes, edges, data):
"""
ArrayStorage specific driver initiation method
Returns a ArrayStorage instance for nodes and edges and a AdjacencyView
for adjacency based on the initiated nodes and edges stores.
:param nodes: Nodes to initiate nodes DictStorage instance
:type nodes: :py:list, :py:dict,
:graphit:graph_arraystorage_driver:DictStorage
:param edges: Edges to initiate edges DictStorage instance
:type edges: :py:list, :py:dict,
:graphit:graph_arraystorage_driver:DictStorage
:param data: graph data attributes to initiate data DictStorage instance
:type data: :py:list, :py:dict,
:graphit:graph_dictstorage_driver:DictStorage
:return: Nodes and edges storage instances and Adjacency view.
"""
node_storage = ArrayStorage(nodes)
edge_storage = ArrayStorage(edges)
data_storage = DictStorage(data)
adjacency_storage = AdjacencyView(node_storage, edge_storage)
return node_storage, edge_storage, adjacency_storage, data_storage
class SeriesStorage(MutableMapping):
"""
SeriesStorage class
Wrapper around the pandas Series object making it fully compliant with the
native Python dict API by using the `collections.MutableMapping` abstract
base class.
Access to the native pandas Series methods is preserved.
"""
__slots__ = ('_storage', '_dropna')
def __init__(self, series):
"""
Implement class __init__
Registers the Pandas series object in the _storage attribute.
The '_dropna' attribute controls if rows with Nan values are
removed before returning elements from the series
:param series: Pandas Series instance
:type series: :pandas:Series
"""
self._storage = series
self._dropna = True
def __getattr__(self, attr):
"""
Implement class __getattr__
Exposes data by key as class attributes with support for calling
methods on the pandas Series instance.
If the key is not present, pass along to the default __getattribute__
method.
:param key: attribute name
:return: attribute value
"""
if hasattr(self._storage, attr):
return getattr(self._storage, attr)
return object.__getattribute__(self, attr)
def __getitem__(self, key):
"""
Implement class __getitem__
:param key: key name
:return: key value
"""
if self._dropna:
return self._storage.dropna()[key]
return self._storage[key]
def __setitem__(self, key, value):
"""
Implement class __setitem__
:param key: key name
:param value: value to set
"""
self._storage.loc[key] = value
def __delitem__(self, key):
"""
Implement class __delitem__
Inplace removal (drop) of the key in the Series object.
The Series is a view on the origin DataFrame where the key will thus be
removed.
:param key: key name
"""
self._storage.drop(labels=key, inplace=True)
def __iter__(self):
"""
Implement class __iter__
Iterate over the keys of a Series after dropping rows with Nan values.
:return: Series keys
"""
if self._dropna:
return iter(self._storage.dropna().index)
return iter(self._storage.index)
def __len__(self):
"""
Implement class __len__
Return the number of key,value pairs in the Series after dropping Nan
values.
"""
if self._dropna:
return len(self._storage.dropna())
return len(self._storage)
@property
def series(self):
"""
:return: the original pandas Series object
:rtype: :pandas:Series
"""
return self._storage
def to_dict(self, dropna=True):
"""
Return a shallow copy of the Series as dictionary.
:param dropna: drop Series rows having Nan values
:type dropna: :py:bool
:rtype: :py:dict
"""
if self._dropna or dropna:
return self._storage.dropna().to_dict()
return self._storage.to_dict()
class ArrayStorage(GraphDriverBaseClass):
"""
ArrayStorage class
Provides a Pandas DataFrame based storage for nodes and edges.
The class supports weak referencing of the internal DataFrame (_storage)
using the weakref module to reduce memory footprint and enable true
synchronized views across different instances of the DictStorage class.
"""
__slots__ = ('_storage', '_view')
def __init__(self, *args, **kwargs):
"""
Implement class __init__
Initiate the internal _storage DataFrame.
If an ArrayStorage instance is provided, a _storage dictionary has been
created and we will setup a weak reference to it. Otherwise init
a new DataFrame using args and/or kwargs as input.
"""
self._storage = DataFrame()
self._view = None
if len(args):
if not len(args) == 1:
raise TypeError('update expected at most 1 arguments, got {0}'.format(len(args)))
mappable = args[0]
# mappable is ArrayStorage instance, setup weakref to _storage
if isinstance(mappable, ArrayStorage):
self._storage = weakref.ref(mappable._storage)()
# mappable is any type accepted by the DataFrame constructor
elif mappable is not None:
mappable = dict(mappable)
try:
self._storage = DataFrame(mappable)
except:
mappable = dict([(k, [v]) for k, v in mappable.items()])
self._storage = DataFrame(mappable)
# no mappable, setup default DataFrame with optional kwargs
else:
self._storage = DataFrame(kwargs)
else:
self._storage = DataFrame(kwargs)
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
if self.is_view:
self._view.remove(key)
self._storage.drop([key], axis=1, inplace=True)
def __getitem__(self, key):
"""
Implement class __getitem__
:param key: key name
:return: key value
"""
view = self._view_select()
result = view.loc[:, key]
if isinstance(result, Series):
return SeriesStorage(result)
return result
def __getattr__(self, key):
"""
Implement class __getattr__
Exposes data by key as class attributes with support for calling
methods on the DataFrame instance.
If the key is not present, pass along to the default __getattribute__
method.
:param key: attribute name
:return: attribute value
"""
if hasattr(self._storage, key):
view_selection = self._view_select()
result = getattr(view_selection, key)
if isinstance(result, Series):
return SeriesStorage(result)
return result
return object.__getattribute__(self, key)
def __iter__(self):
"""
Implement class __iter__
Iterate over keys in _storage
"""
view = self._view_select()
for key in view.keys():
yield key
def __len__(self):
if self.is_view:
return len(self._view)
return len(self.keys())
def __setitem__(self, key, value):
self.set(key, value)
@property
def dataframe(self):
"""
:return: the original pandas DataFrame object
:rtype: :pandas:DataFrame
"""
return self._storage
def _view_select(self):
view = self._storage
if self.is_view:
view = self._storage.loc[:, self._storage.columns.intersection(self._view)]
if isinstance(view, Series):
return SeriesStorage(view)
return view
def copy(self):
"""
Return a deep copy of the storage class with the same view as
the parent instance.
:return: deep copy of storage instance
:rtype: ArrayStorage
"""
deepcopy = ArrayStorage(self._storage.copy(deep=True))
if self.is_view:
deepcopy.set_view(self._view)
return deepcopy
def del_data_reference(self, target):
"""
Implements GraphDriverBaseClass abstract method.
The array storage does not support reference pointers
"""
logging.warning('ArrayStorage does not support reference pointers')
return
def get_data_reference(self, target, default=None):
"""
Implements GraphDriverBaseClass abstract method.
The array storage does not support reference pointers
"""
logging.warning('ArrayStorage does not support reference pointers')
return None
def get(self, key, default=None):
view_selection = self._view_select()
result = view_selection.get(key, default=default)
if isinstance(result, Series):
return SeriesStorage(result)
return result
def set(self, key, value):
try:
value = dict(value)
except (ValueError, TypeError):
logging.debug('Unable to convert value to dictionary: {0}'.format(type(value)))
index = ['key']
if isinstance(value, dict):
index = value.keys()
# Convert input dict to Series
s = Series(value, index=index)
# Add new series keys as row to DataFrame
for name in s.index:
if name not in self._storage.index:
self._storage.loc[name, :] = Nan
# Update DataFrame
self._storage[key] = s
# If new key and is_view, add to view
if self.is_view:
self._view.append(key)
def set_data_reference(self, source, target):
"""
The array storage class does not support data referencing and will
simply copy the data stored in source to target
:param source: source key having the data
:param target: target key referring to data of source
"""
if source in self:
self[target] = self.get(source)
else:
logging.error('Unable to set reference from source {0} to target {1}. Source does not exist.')
def to_dict(self, return_full=False):
"""
Return a shallow copy of the full dictionary.
If the current ArrayStorage represent a selective view on the parent
dictionary then only return a dictionary with a shallow copy of the
keys in the selective view.
:param return_full: ignores is_view and return the full dictionary
:type return_full: bool
:rtype: :py:dict
"""
view = self._storage
if not return_full:
view = self._view_select()
# Single row then single value per column index
if len(view.index) == 1:
row = view.loc[view.index[0], :]
return dict(zip(view.columns, row.values))
return_dict = {}
for k, v in view.items():
return_dict[k] = v.dropna().to_dict()
return return_dict
def keys(self):
"""
Implements a Python dict style 'keys' method
Returns the keys of the DataFrame which equal the columns returned as
pandas Index object. This is subsequently returned as plain list.
:return: dataframe column indexes (keys)
:rtype: :py:list
"""
view = self._view_select()
return list(view.keys())
iterkeys = keys
viewkeys = keys
def items(self):
view = self._view_select()
for k, v in view.items():
yield (k, SeriesStorage(v))
iteritems = items
viewitems = items
def values(self):
for item in self.items():
yield item[1]
itervalues = values
viewvalues = values
| StarcoderdataPython |
4926290 | <filename>uploadserver/__main__.py<gh_stars>10-100
import uploadserver
if __name__ == '__main__':
uploadserver.main()
| StarcoderdataPython |
266202 | from gym import GoalEnv, spaces
from inspect import getargspec
from mujoco_py import MjViewer
import numpy as np
from .environment import Environment
class GymWrapper(GoalEnv):
"""Wraps HAC environment in gym environment.
Assumes hac_env is an instance of Environment as defined in the
original Hierarchical Actor-Critic implementation at
https://github.com/andrew-j-levy/Hierarchical-Actor-Critc-HAC-
"""
def __init__(self, hac_env, observation_space_bounds=None):
self.hac_env = hac_env
self.max_episode_length = hac_env.max_actions
self.viewer = None
# action space
action_low = hac_env.action_offset - hac_env.action_bounds
action_high = hac_env.action_offset + hac_env.action_bounds
self.action_space = spaces.Box(low=action_low, high=action_high, dtype=np.float32)
# partial observation space
if observation_space_bounds is None:
# appropriate for UR5 and Pendulum
partial_obs_space = spaces.Box(
low=-np.inf,
high=np.inf,
shape=(hac_env.state_dim,),
dtype=np.float32
)
else:
partial_obs_space = spaces.Box(
low=observation_space_bounds[:, 0],
high=observation_space_bounds[:, 1],
dtype=np.float32
)
# goal spaces (Use goal space used for training in original paper)
goal_low = np.array(hac_env.goal_space_train)[:, 0]
goal_high = np.array(hac_env.goal_space_train)[:, 1]
desired_goal_space = spaces.Box(low=goal_low, high=goal_high, dtype=np.float32)
achieved_goal_space = desired_goal_space
# observation space, including desired and achieved goal
self.observation_space = spaces.Dict({
"observation": partial_obs_space,
"desired_goal": desired_goal_space,
"achieved_goal": achieved_goal_space
})
self.reset()
def _get_obs(self, state):
achieved_goal = self.hac_env.project_state_to_end_goal(self.hac_env.sim, state)
obs = {
"observation": state,
"desired_goal": self.desired_goal,
"achieved_goal": achieved_goal
}
return obs
def compute_reward(self, achieved_goal, desired_goal, info):
tolerance = self.hac_env.end_goal_thresholds
reward = 0.
for a_goal, d_goal, tol in zip(achieved_goal, desired_goal, tolerance):
if np.absolute(a_goal - d_goal) > tol:
reward = -1.
break
return reward
def step(self, action):
state = self.hac_env.execute_action(action)
self.n_steps += 1
obs = self._get_obs(state)
info = {}
reward = self.compute_reward(obs["achieved_goal"], obs["desired_goal"], info)
done = True if reward == 0. or self.n_steps >= self.max_episode_length else False
return obs, reward, done, info
def reset(self, **kwargs):
self.desired_goal = self.hac_env.get_next_goal(test=False)
spec = getargspec(self.hac_env.reset_sim)
if "next_goal" in spec.args:
state = self.hac_env.reset_sim(self.desired_goal)
else:
state = self.hac_env.reset_sim()
obs = self._get_obs(state)
self.n_steps = 0
return obs
def update_subgoals(self, subgoals):
self.hac_env.display_subgoals(subgoals + [None])
def update_timed_subgoals(self, timed_subgoals, tolerances):
subgoals = [tg.goal for tg in timed_subgoals if tg is not None]
# NOTE: Visualization of time component of timed subgoals is not supported
# by HAC environments.
self.update_subgoals(subgoals)
def render(self, mode):
if self.viewer is None:
self.viewer = MjViewer(self.hac_env.sim)
self.viewer.render()
| StarcoderdataPython |
6478230 | #!/usr/bin/python
#################################################################################
# EC2 server backup daily retenttion job
# removes backups older than day specified in local cache
#
# ec2_local_backup_retention.py --days 10 --dir /var/backup --prefix db_bk.tgz
#################################################################################
import sys
import os
import pytz
from datetime import datetime, timezone, timedelta
import argparse
def app_run():
parser = argparse.ArgumentParser(description='EC2 local Backup retention')
parser.add_argument('--days', help='days to retain ')
parser.add_argument('--dir', help='Linux EC2 server dir')
parser.add_argument(
'--backup_prefix', help="daily backup file prefix - use \'myback\' not \'myback*\'")
parser.add_argument(
'--suffix', help="daily backup file suffix - use 'xls' not '*.xls'")
parser.add_argument('--dry_run', action="store_true",
help='dry-run for testing')
args = parser.parse_args()
# special arg processing if necessary
def check_args():
days_specifed = None
file_prefix = ""
my_dir = ""
dry_run = False
if (args.dry_run):
dry_run = True
if (args.days):
days_specifed = int(args.days)
else:
days_specifed = 10
file_prefix = args.backup_prefix
file_suffix = args.suffix
if file_prefix is None:
file_prefix = " "
if file_suffix is None:
file_suffix = "... not specified"
my_dir = args.dir
return days_specifed, file_prefix, my_dir, dry_run, file_suffix
#
days_specifed, file_prefix, my_dir, dry_run, file_suffix = check_args()
if my_dir == None:
print("No dir specified - see -h for commands")
sys.exit(4)
today = datetime.now(timezone.utc)
retention_period = today - timedelta(days=days_specifed)
# main_entry_point
process_ec2_dir(days_specifed, file_prefix, file_suffix, my_dir,
dry_run, today, retention_period)
return
def process_ec2_dir(days_specifed, file_prefix, suffix, my_dir, dry_run, today, retention_period):
def print_parms(file_prefix, suffix, my_dir, today, retention_period):
print("today's date is ", today)
print("Start of retention period (days) ", retention_period)
print("EC2 server dir: ", my_dir)
print("backup prefix: ", file_prefix)
print("backup suffix: ", suffix)
return
def delete_files(dry_run, delete_candidate_list):
for obj in delete_candidate_list:
print("Deleting: ", obj)
if (dry_run == False):
os.remove(obj)
return
def deletion_summary(delete_candidate_list):
if (len(delete_candidate_list) > 0):
print("Number of files to delete: ", len(delete_candidate_list))
print("deleting older files")
return
def get_dir(my_dir):
objects = os.listdir(my_dir)
os.chdir(my_dir)
return objects
def get_file_timestamp(utc, o):
o_time = datetime.fromtimestamp(os.stat(o).st_ctime)
o_time = utc.localize(o_time)
return o_time
def filter_dir_obj(days_specifed, file_prefix, suffix, my_dir, retention_period, filter_lists):
found_candidate_list = filter_lists[1]
delete_candidate_list = filter_lists[0]
objects = get_dir(my_dir)
utc = pytz.UTC
for o in objects:
o_time = get_file_timestamp(utc, o)
# print("file: ", o, "time: ", o_time )
if o.startswith(file_prefix) or (o.endswith(suffix)):
found_candidate_list.append(o)
if o_time < retention_period:
print("older than " , days_specifed, " ", end='')
delete_candidate_list.append(o)
print("file: ", o, "time: ", o_time)
return
def list_summary(found_candidate_list):
print("***************Summary***************")
print("Num of objects found: ", len(found_candidate_list))
return
delete_candidate_list = []
found_candidate_list = []
filter_lists = [delete_candidate_list, found_candidate_list]
# main processing loop ec2 files
print_parms(file_prefix, suffix, my_dir, today, retention_period)
filter_dir_obj(days_specifed, file_prefix, suffix, my_dir,
retention_period, filter_lists)
list_summary(found_candidate_list)
deletion_summary(delete_candidate_list)
delete_files(dry_run, delete_candidate_list)
return
if __name__ == "__main__":
app_run()
| StarcoderdataPython |
1723085 | #!/usr/bin/python3
''' FILE NAME
relay_demo_gpiod.py
1. WHAT IT DOES
This is a very simple script that shows how to turn on
and off a single relay on the Keyestudio 4 Channel relay
HAT, or any other relay HAT that can be controled directly
through the Raspberry Pi GPIOs. In this example, the
GPIO is controlled using the pigpio Python module with
the pigpiod (deamon).
2. REQUIRES
* Any Raspberry Pi with a 40-pin header.
* Keystudio 4 Channel relay HAT or similar
Optional:
* -
3. ORIGINAL WORK
Make A Raspberry Pi Bench Computer, <NAME>
4. HARDWARE
Connect the required hardware to the Raspberry Pi: Relay HAT.
5. SOFTWARE
* Command line terminal
* Simple text editor
* SSH and SCP
'''
import pigpio
from time import sleep
pi = pigpio.pi()
relay_a = 22 # Set a GPIO for the realy
pi.set_mode(relay_a,pigpio.OUTPUT) # Make output
while (True):
pi.write(relay_a,0) # Set to LOW
sleep(1)
pi.write(relay_a,1) # Set to HIGH
sleep(1) | StarcoderdataPython |
5070653 | <reponame>RajivBiswal/myprofile-rest-api
from django.urls import path, include
from profile_api import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('hello-viewset', views.HelloVeiwset, base_name='hello-viewset')
router.register('profile', views.UserProfileViewset)
router.register('feed', views.UserProfileFeedViewset)
urlpatterns = [
path('hello-view/', views.MyApiView.as_view()),
path('', include(router.urls)),
path('login/', views.UserLoginApiView.as_view()),
]
| StarcoderdataPython |
3532627 | <gh_stars>1-10
from typing import List, Optional, Tuple
from mikan.combine import NumberCombine, StandardCombine, TsuCombine
from mikan.compound import Compound
from mikan.number import Number
from mikan.word import Word
from mikan.writing import Writing
__all__ = [
'Counter',
'DayHourCounter',
'MonthDayCounter',
'MonthCounter',
'PersonCounter',
'TsuCounter',
]
class Counter(Word):
pass
class CounterCompound(Compound):
def __init__(self, words: List[Word], writings: Optional[List[Writing]]=None) -> None:
if (
(len(words) != 2) or
not (isinstance(words[0], Number) and isinstance(words[1], Counter))
):
raise ValueError
super().__init__(words, writings=writings, combine=StandardCombine())
class DayHourCounter(Word):
def __init__(self) -> None:
super().__init__('時', 'じ')
class DayHourCounterCompound(Compound):
__EXCEPTIONS = {
4: ['よじ'],
9: ['くじ'],
}
def __init__(
self,
words: Tuple[Number, Counter],
writings: Optional[List[Writing]]=None
) -> None:
if (
(len(words) != 2) or
not (isinstance(words[0], Number) and isinstance(words[1], DayHourCounter))
):
raise ValueError
super().__init__(words, writings=writings, combine=NumberCombine(self.__EXCEPTIONS))
class MonthDayCounter(Word):
def __init__(self) -> None:
super().__init__('日', 'にち')
class MonthDayCounterCompound(Compound):
__EXCEPTIONS = {
1: ['ついたち'],
2: ['ふつか'],
3: ['みっか'],
4: ['よっか'],
5: ['いつか'],
6: ['むいか'],
7: ['なのか'],
8: ['ようか'],
9: ['ここのか'],
10: ['とおか'],
14: ['じゅうよっか'],
20: ['はつか'],
24: ['にじゅうよっか'],
}
def __init__(
self,
words: Tuple[Number, Counter],
writings: Optional[List[Writing]]=None
) -> None:
if (
(len(words) != 2) or
not (isinstance(words[0], Number) and isinstance(words[1], MonthDayCounter))
):
raise ValueError
super().__init__(words, writings=writings, combine=NumberCombine(self.__EXCEPTIONS))
class MonthCounter(Word):
def __init__(self) -> None:
super().__init__('月', 'がつ')
class MonthCounterCompound(Compound):
__EXCEPTIONS = {
4: ['しがつ'],
7: ['しちがつ'],
9: ['くがつ'],
}
def __init__(
self,
words: Tuple[Number, Counter],
writings: Optional[List[Writing]]=None
) -> None:
if (
(len(words) != 2) or
not (isinstance(words[0], Number) and isinstance(words[1], MonthCounter))
):
raise ValueError
super().__init__(words, writings=writings, combine=NumberCombine(self.__EXCEPTIONS))
class PersonCounter(Word):
def __init__(self) -> None:
super().__init__('人', 'にん')
class PersonCounterCompound(Compound):
__EXCEPTIONS = {
1: ['ひとり'],
2: ['ふたり'],
4: ['よにん'],
7: ['ななにん', 'しちにん'],
}
def __init__(
self,
words: Tuple[Number, Counter],
writings: Optional[List[Writing]]=None
) -> None:
if (
(len(words) != 2) or
not (isinstance(words[0], Number) and isinstance(words[1], PersonCounter))
):
raise ValueError
super().__init__(words, writings=writings, combine=NumberCombine(self.__EXCEPTIONS))
class TsuCounter(Word):
def __init__(self) -> None:
super().__init__('つ')
class TsuCounterCompound(Compound):
__EXCEPTIONS = {
1: ['ひとつ'],
2: ['ふたつ'],
3: ['みっつ'],
4: ['よっつ'],
5: ['いつつ'],
6: ['むっつ'],
7: ['ななつ'],
8: ['やっつ'],
9: ['ここのつ'],
10: ['とお'],
}
def __init__(
self,
words: Tuple[Number, Counter],
writings: Optional[List[Writing]]=None
) -> None:
if (
(len(words) != 2) or
not (isinstance(words[0], Number) and isinstance(words[1], TsuCounter))
):
raise ValueError
super().__init__(words, writings=writings, combine=TsuCombine(self.__EXCEPTIONS))
| StarcoderdataPython |
9754398 | from Task import Task
from Interfaces import Management
from Helper import Level
from time import sleep
from datetime import datetime
class SingleSliceCreationTime(Task):
def __init__(self, logMethod, parent, params):
super().__init__("Single Slice Creation Time Measurement", parent, params, logMethod, None)
def Run(self):
executionId = self.params['ExecutionId']
waitForRunning = self.params['WaitForRunning']
timeout = self.params.get('Timeout', None)
sliceId = self.params['SliceId']
count = 0
if waitForRunning:
self.Log(Level.INFO, f"Waiting for slice to be running. Timeout: {timeout}")
while True:
count += 1
status = Management.SliceManager().Check(sliceId).get('status', '<SliceManager check error>')
self.Log(Level.DEBUG, f'Slice {sliceId} status: {status} (retry {count})')
if status == 'Running' or (timeout is not None and timeout >= count): break
else: sleep(1)
self.Log(Level.INFO, f"Reading deployment times for slice {sliceId}")
times = Management.SliceManager().Time(sliceId)
self.Log(Level.DEBUG, f"Received times: {times}")
self.Log(Level.INFO, f"Generating results payload")
from Helper import InfluxDb, InfluxPayload, InfluxPoint # Delayed to avoid cyclic imports
payload = InfluxPayload("Single Slice Creation Time")
payload.Tags = {'ExecutionId': str(executionId)}
point = InfluxPoint(datetime.utcnow())
for key in ["Slice_Deployment_Time", "Placement_Time", "Provisioning_Time"]:
value = times.get(key, "N/A")
if value != "N/A":
point.Fields[key] = float(value)
payload.Points.append(point)
self.Log(Level.DEBUG, f"Payload: {payload}")
self.Log(Level.INFO, f"Sending results to InfluxDb")
InfluxDb.Send(payload)
# TODO: Artificial wait until the slice is 'configured'
# TODO: In the future the slice manager should also report this status
sleep(60)
| StarcoderdataPython |
249897 | #!/usr/bin/env python3
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
class Command(BaseCommand):
help = 'Add teammate'
def add_arguments(self, parser):
parser.add_argument('username', nargs='+', type=str)
def handle(self, *args, **options):
for username in options['username']:
try:
user = User.objects.get(username=username)
user.is_staff = True
user.is_superuser = True
print('Making %s a superuser!' % username)
user.save()
except User.DoesNotExist:
print("%s does not exist" % username)
| StarcoderdataPython |
1719579 | <gh_stars>0
from django.shortcuts import render
from django.views.generic import ListView
from.models import LegoSet
# Create your views here.
def LegoHome(request):
return render(request, "lego/lego_home.html")
class LegoListView(ListView):
model = LegoSet
template_name = "lego/lego_list.html"
| StarcoderdataPython |
5079253 | <reponame>linqyd/etk
import re
"""
Keywords: all the street types we want to match on.
"""
keywords = ["avenue", "blvd", "boulevard", "pkwy", "parkway", "way",
"st", "street", "rd", "road", "drive", "lane", "alley", "ave"]
keyword_patterns = dict()
for each_keyword in keywords:
p = re.compile(r'\b%s\b' % each_keyword.lower(), re.I)
keyword_patterns[each_keyword] = p
phonePattern = re.compile(r'(\d{3})\D*(\d{3})\D*(\d{4})')
def clean_address(text_string, level):
if level > 0:
# Slicing from 'location' word in found adddress
pos = text_string.find('location')
if pos > -1:
text_string = text_string[pos + len('location'):]
if level > 1:
# Slicing from phone number reference word in found adddress to end
m = phonePattern.search(text_string)
if m is not None:
pos = m.span()[1]
text_string = text_string[pos:]
if level > 2:
# Cleaning if maps URL present
if text_string.find('maps.google.com') > -1 or text_string.find('=') > -1:
pos = text_string.rfind('=')
if pos > -1:
text_string = text_string[pos + 1:].replace('+', ' ')
return text_string.strip()
def get_num(text, start, dist):
end = start + 1
flag = 0
while start > 0 and end - start <= dist and text[start] != '\r' and text[start] != '\n':
if text[start].isdigit() and (start - 1 == 0 or text[start - 1] == " " or text[start - 1] == "\n" or text[start - 1] == ">" or text[start - 1] == ")"):
flag = 1
break
start = start - 1
return flag, start
def get_num_next(text, end, dist):
start = end
flag = 0
count = 0
while end < len(text) - 2 and end - start <= dist and text[start] != '\r' and text[start] != '\n':
if text[end].isdigit():
count += 1
if count == 5 and text[end + 1].isdigit() and (end + 1 == len(text) - 2 or text[end + 1] == " " or text[end + 1] == "\n" or text[end + 1] == "<"):
flag = 1
break
end += 1
return flag, end + 1
def getSpace(text, start):
while start > 0:
if text[start - 1] == " ":
break
start -= 1
return start
def extract_address(text, p, type1, addresses, offset=0):
m = p.search(text, offset)
if m is None:
return addresses
end = m.span()[0] + len(type1) + 1
if end != -1:
flag = 1
flag, bkStart = get_num(text, end - (len(type1) + 1), 50)
if flag == 0:
start = getSpace(text, end - (len(type1) + 2))
elif flag == 1:
flag, start = get_num(text, bkStart - 1, 10)
if flag == 0:
start = bkStart
flag, newEnd = get_num_next(text, end, 25)
if flag:
end = newEnd
# Removed context flag check
address = {'value': clean_address(text[start:end], 3),
'context': {'start': start,
'end': end}}
addresses.append(address)
addresses = extract_address(text, p, type1, addresses, end)
return addresses
return addresses
"""
Input: Text String and keyword python list ex: ["ave","street"] etc.
Output: Json object containing input text string with list of
associated present addresses
Uses keywords list passed as an parameter
"""
def extract(text_string):
addresses = list()
text_string_lower = text_string.lower()
for k, p in keyword_patterns.iteritems():
extract_address(text_string_lower, p, k, addresses)
return addresses
| StarcoderdataPython |
8053843 | <filename>coffeecart/cart/admin.py
from django.contrib import admin
from .models import Snacks
from .models import Drinks
admin.site.register(Snacks)
admin.site.register(Drinks)
| StarcoderdataPython |
5138715 | <filename>misc/logic_func.py
def equalElements_check(my_list):
"""
Checks if elements in the list are the same
"""
return len(set(my_list)) <= 1
| StarcoderdataPython |
3250208 | class RussianSpeedLimits:
def getCurrentLimit(self, signs):
c, s = True, 60
for sign in signs:
if sign == 'default':
s = 60 if c else 90
elif sign == 'city':
s = 90 if c else 60
c = not c
else:
s = int(sign)
return s
| StarcoderdataPython |
8142145 | from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_rq2 import RQ
from flask_sqlalchemy import SQLAlchemy
bootstrap = Bootstrap()
db = SQLAlchemy()
migrate = Migrate()
rq = RQ()
login_manager = LoginManager()
login_manager.login_view = "web.login"
| StarcoderdataPython |
5184044 | from django_elasticsearch_dsl import (
Document,
fields,
Index,
)
from django_elasticsearch_dsl_drf.compat import KeywordField, StringField
from django.conf import settings
from hyper.models import HyperManager
INDEX = Index(settings.ELASTICSEARCH_INDEX_NAMES[__name__])
@INDEX.doc_type
class HyperManagerDocument(Document):
id = fields.IntegerField()
name = KeywordField()
url = fields.KeywordField()
online = fields.BooleanField()
version = fields.KeywordField()
class Django(object):
model = HyperManager
| StarcoderdataPython |
6416370 | # -*- coding: utf-8 -*-
"""
Created on Mar 18th 10:58:37 2016
run models, including training and validating
@author: hongyuan
"""
import pickle
import time
import numpy
import theano
from theano import sandbox
import theano.tensor as tensor
import os
import scipy.io
from collections import defaultdict
from theano.tensor.shared_randomstreams import RandomStreams
import modules.utils as utils
import modules.models as models
import modules.optimizers as optimizers
import modules.controllers as controllers
import modules.data_processers as data_processers
import modules.beam_search as searchers
import modules.evals as evaluations
dtype=theano.config.floatX
#TODO: function to train seq2seq models
def train_selgen(input_train):
'''
this function is called to train Sel Gen model
'''
#TODO: pre-settings like random states
numpy.random.seed(input_train['seed_random'])
#
save_file_path = os.path.abspath(
input_train['save_file_path']
)
command_mkdir = 'mkdir -p ' + save_file_path
os.system(command_mkdir)
#
log_dict = {
'log_file': input_train['log_file'],
'save_file_path': save_file_path,
'mode': 'create', 'compile_time': None,
'min_dev_loss': 1e6,
'max_dev_bleu': -1.0,
#
'args': input_train['args'],
#
'tracked_best': {},
#
'iteration': 0,
'track_period': input_train['track_period'],
'max_epoch': input_train['max_epoch'],
'size_batch': input_train['size_batch'],
'tracked': {
'track_cnt': None,
'train_loss': None,
#'dev_loss': None,
'dev_bleu': None,
'dev_F1': None,
#
'train_time': None, 'dev_time': None
}
}
#TODO: get the data and process the data
print "reading and processing data ... "
data_process = data_processers.DataProcesser(
{
'path_data': input_train['path_rawdata'],
'size_batch': input_train['size_batch']
}
)
#
#TODO: build the model
print "building model ... "
compile_start = time.time()
model_settings = {
'dim_model': input_train['dim_model'],
'dim_lang': data_process.dim_lang,
'dim_info': data_process.dim_info,
'num_sel': input_train['num_sel'],
'size_batch': input_train['size_batch'],
'optimizer': input_train['optimizer'],
'path_pre_train': input_train['path_pre_train']
}
control = controllers.ControlSelGen(
model_settings
)
compile_end = time.time()
compile_time = compile_end - compile_start
#'''
print "building Bleu Scorer ... "
settings_bs = {
'size_beam': 1,
'path_model': None,
'normalize_mode': True
}
beam_search = searchers.BeamSearchSelGen(settings_bs)
#
#settings_bleu = {
# 'path_program': None,
# 'path_bleu': input_train['path_bleu']
#}
bleu_scorer = evaluations.BleuScoreNLTK()
bleu_scorer.set_refs(
data_process.get_refs(tag_split='dev')
)
#
f1_computer = evaluations.F1Compute()
f1_computer.set_golds(
data_process.get_golds(tag_split='dev')
)
#
print "model finished, comilation time is ", round(compile_time, 0)
#TODO: start training, define the training functions
print "building training log ... "
log_dict['compile_time'] = round(compile_time, 0)
data_process.track_log(log_dict)
log_dict['mode'] = 'continue'
for epi in range(log_dict['max_epoch']):
#
print "training epoch ", epi
#
err = 0.0
#TODO: shuffle the training data and train this epoch
data_process.shuffle_train_data()
#
for step_train in range(data_process.max_nums['train'] ):
#
train_start = time.time()
#print "the step is ", step
#
data_process.process_data(
'train', step_train
)
#
#print "training ... "
cost_numpy = control.model_learn(
data_process.seq_info_numpy,
data_process.seq_lang_numpy,
data_process.seq_target_numpy
)
#
#
log_dict['iteration'] += 1
err += cost_numpy
#
log_dict['tracked']['train_loss'] = round(err/(step_train+1), 4)
train_end = time.time()
log_dict['tracked']['train_time'] = round(
(
train_end - train_start
)*log_dict['track_period'], 0
)
#
if step_train % 10 == 9:
print "in training, the step is out of ", step_train, data_process.max_nums['train']
########
# Now we track the performance and save the model for every # batches, so that we do not miss the convergence within the epoch -- one epoch is too large sometimes
########
if log_dict['iteration'] % log_dict['track_period'] == 0:
#TODO: go through the dev data and calculate the dev metrics
print "Now we start validating after batches ", log_dict['track_period']
dev_start = time.time()
#
#TODO: set model to bleu score
beam_search.set_model(
control.get_model()
)
#
bleu_scorer.reset_gens()
f1_computer.reset_aligns()
#TODO: get the dev loss values
sum_costs = 0.0
for step_dev in range(data_process.lens['dev']):
#
data_process.process_one_data(
'dev', step_dev
)
#
#print "validating ... "
#
beam_search.refresh_state()
beam_search.set_encoder(
data_process.seq_info_numpy
)
beam_search.init_beam()
beam_search.search_func()
#
f1_computer.add_align(
beam_search.get_top_att()
)
#
gen_step_dev = data_process.translate(
beam_search.get_top_target()
)
bleu_scorer.add_gen(gen_step_dev)
#
if step_dev % 100 == 99:
print "in dev, the step is out of ", step_dev, data_process.lens['dev']
#
bleu_score = bleu_scorer.evaluate()
f1_score = f1_computer.evaluate()
#
log_dict['tracked']['dev_bleu'] = round(
bleu_score, 2
)
log_dict['tracked']['dev_F1'] = round(
f1_score, 2
)
#
dev_end = time.time()
log_dict['tracked']['dev_time'] = round( dev_end - dev_start, 0 )
#
log_dict['tracked']['track_cnt'] = int(
log_dict['iteration']/log_dict['track_period']
)
#
#
if log_dict['tracked']['dev_bleu'] > log_dict['max_dev_bleu']:
save_file = os.path.abspath(
log_dict['save_file_path']
) + '/'+'model.pkl'
control.save_model(save_file)
#
data_process.track_log(log_dict)
########
data_process.finish_log(log_dict)
print "finish training"
#
#
def train_selgen_eval_angeli(input_train):
'''
this function is called to train Sel Gen model
'''
#TODO: pre-settings like random states
numpy.random.seed(input_train['seed_random'])
#
save_file_path = os.path.abspath(
input_train['save_file_path']
)
command_mkdir = 'mkdir -p ' + save_file_path
os.system(command_mkdir)
#
log_dict = {
'log_file': input_train['log_file'],
'save_file_path': save_file_path,
'mode': 'create', 'compile_time': None,
'min_dev_loss': 1e6,
'max_dev_bleu': -1.0,
#
'args': input_train['args'],
#
'tracked_best': {},
#
'iteration': 0,
'track_period': input_train['track_period'],
'max_epoch': input_train['max_epoch'],
'size_batch': input_train['size_batch'],
'tracked': {
'track_cnt': None,
'train_loss': None,
#'dev_loss': None,
'dev_bleu_s': None,
'dev_bleu': None,
'dev_F1': None,
#
'train_time': None, 'dev_time': None
}
}
#TODO: get the data and process the data
print "reading and processing data ... "
data_process = data_processers.DataProcesser(
{
'path_data': input_train['path_rawdata'],
'size_batch': input_train['size_batch']
}
)
#
#TODO: build the model
print "building model ... "
#'''
print "building Bleu Scorer ... "
settings_bs = {
'size_beam': 1,
'path_model': None,
'normalize_mode': True
}
beam_search = searchers.BeamSearchSelGen(settings_bs)
#
#settings_bleu = {
# 'path_program': None,
# 'path_bleu': input_train['path_bleu']
#}
bleu_scorer = evaluations.BleuScoreAngeli(
{
'path_jvm': input_train['path_jvm'],
'path_jar': input_train['path_jar'],
'max_diff': 0
}
)
bleu_scorer.set_refs(
data_process.get_refs(tag_split='dev')
)
#
f1_computer = evaluations.F1Compute()
f1_computer.set_golds(
data_process.get_golds(tag_split='dev')
)
#
compile_start = time.time()
model_settings = {
'dim_model': input_train['dim_model'],
'dim_lang': data_process.dim_lang,
'dim_info': data_process.dim_info,
'num_sel': input_train['num_sel'],
'size_batch': input_train['size_batch'],
'optimizer': input_train['optimizer'],
'path_pre_train': input_train['path_pre_train'],
'coef': input_train['coef']
}
control = controllers.ControlSelGen(
model_settings
)
compile_end = time.time()
compile_time = compile_end - compile_start
#
print "model finished, comilation time is ", round(compile_time, 0)
#TODO: start training, define the training functions
print "building training log ... "
log_dict['compile_time'] = round(compile_time, 0)
data_process.track_log(log_dict)
log_dict['mode'] = 'continue'
for epi in range(log_dict['max_epoch']):
#
print "training epoch ", epi
#
err = 0.0
#TODO: shuffle the training data and train this epoch
data_process.shuffle_train_data()
#
for step_train in range(data_process.max_nums['train'] ):
#
train_start = time.time()
#print "the step is ", step
#
data_process.process_data(
'train', step_train
)
#
#print "training ... "
cost_numpy = control.model_learn(
data_process.seq_info_numpy,
data_process.seq_lang_numpy,
data_process.seq_target_numpy
)
#
#
log_dict['iteration'] += 1
err += cost_numpy
#
log_dict['tracked']['train_loss'] = round(err/(step_train+1), 4)
train_end = time.time()
log_dict['tracked']['train_time'] = round(
(
train_end - train_start
)*log_dict['track_period'], 0
)
#
if step_train % 10 == 9:
print "in training, the step is out of ", step_train, data_process.max_nums['train']
########
# Now we track the performance and save the model for every # batches, so that we do not miss the convergence within the epoch -- one epoch is too large sometimes
########
if log_dict['iteration'] % log_dict['track_period'] == 0:
#TODO: go through the dev data and calculate the dev metrics
print "Now we start validating after batches ", log_dict['track_period']
dev_start = time.time()
#
#TODO: set model to bleu score
beam_search.set_model(
control.get_model()
)
#
bleu_scorer.reset_gens()
f1_computer.reset_aligns()
#TODO: get the dev loss values
sum_costs = 0.0
for step_dev in range(data_process.lens['dev']):
#
data_process.process_one_data(
'dev', step_dev
)
#
#print "validating ... "
#
beam_search.refresh_state()
beam_search.set_encoder(
data_process.seq_info_numpy
)
beam_search.init_beam()
beam_search.search_func()
#
f1_computer.add_align(
beam_search.get_top_att()
)
#
gen_step_dev = data_process.translate(
beam_search.get_top_target()
)
bleu_scorer.add_gen(gen_step_dev)
#
if step_dev % 100 == 99:
print "in dev, the step is out of ", step_dev, data_process.lens['dev']
#
bleu_scorer.set_threshold(0)
bleu_score = bleu_scorer.evaluate()
#
bleu_scorer.set_threshold(5)
bleu_score_2 = bleu_scorer.evaluate()
#
f1_score = f1_computer.evaluate()
#
log_dict['tracked']['dev_bleu_s'] = round(
bleu_score, 2
)
log_dict['tracked']['dev_bleu'] = round(
bleu_score_2, 2
)
log_dict['tracked']['dev_F1'] = round(
f1_score, 2
)
#
dev_end = time.time()
log_dict['tracked']['dev_time'] = round( dev_end - dev_start, 0 )
#
log_dict['tracked']['track_cnt'] = int(
log_dict['iteration']/log_dict['track_period']
)
#
#
if log_dict['tracked']['dev_bleu'] > log_dict['max_dev_bleu']:
save_file = os.path.abspath(
log_dict['save_file_path']
) + '/'+'model.pkl'
control.save_model(save_file)
#
data_process.track_log(log_dict)
########
data_process.finish_log(log_dict)
bleu_scorer.shutdownJVM()
print "finish training"
#
#
| StarcoderdataPython |
260790 | from __future__ import print_function
import os
import sys
import time
import argparse
import datetime
import math
import pickle
import numpy as np
import torchvision
import torchvision.transforms as transforms
import torch
import torch.utils.data as data
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import Bayesian_config as cf
from utils.BBBlayers import GaussianVariationalInference
from utils.BayesianModels.Bayesian3Conv3FC import BBB3Conv3FC
from utils.BayesianModels.BayesianAlexNet import BBBAlexNet
from utils.BayesianModels.BayesianLeNet import BBBLeNet
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--lr', default=0.0001, type=float, help='learning_rate')
parser.add_argument('--net_type', default='3conv3fc', type=str, help='model')
#parser.add_argument('--depth', default=28, type=int, help='depth of model')
#parser.add_argument('--widen_factor', default=10, type=int, help='width of model')
parser.add_argument('--num_samples', default=10, type=int, help='Number of samples')
parser.add_argument('--beta_type', default="Blundell", type=str, help='Beta type')
parser.add_argument('--p_logvar_init', default=0, type=int, help='p_logvar_init')
parser.add_argument('--q_logvar_init', default=-10, type=int, help='q_logvar_init')
parser.add_argument('--weight_decay', default=0.0005, type=float, help='weight_decay')
parser.add_argument('--dataset', default='mnist', type=str, help='dataset = [mnist/cifar10/cifar100]')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--testOnly', '-t', action='store_true', help='Test mode with the saved model')
args = parser.parse_args()
# Hyper Parameter settings
# use_cuda = torch.cuda.is_available()
use_cuda = cf.use_cuda()
if use_cuda is True:
torch.cuda.set_device(0)
best_acc = 0
resize = 32
start_epoch, num_epochs, batch_size, optim_type = cf.start_epoch, cf.num_epochs, cf.batch_size, cf.optim_type
# Data Uplaod
print('\n[Phase 1] : Data Preparation')
transform_train = transforms.Compose([
transforms.Resize((resize, resize)),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
]) # meanstd transformation
transform_test = transforms.Compose([
transforms.Resize((resize, resize)),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
if (args.dataset == 'cifar10'):
print("| Preparing CIFAR-10 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
outputs = 10
inputs = 3
elif (args.dataset == 'cifar100'):
print("| Preparing CIFAR-100 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test)
outputs = 100
inputs = 3
elif (args.dataset == 'mnist'):
print("| Preparing MNIST dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.MNIST(root='./data', train=False, download=False, transform=transform_test)
outputs = 10
inputs = 1
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=4)
# Return network & file name
def getNetwork(args):
if (args.net_type == 'lenet'):
net = BBBLeNet(outputs,inputs)
file_name = 'lenet'
elif (args.net_type == 'alexnet'):
net = BBBAlexNet(outputs,inputs)
file_name = 'alexnet-'
elif (args.net_type == '3conv3fc'):
net = BBB3Conv3FC(outputs,inputs)
file_name = '3Conv3FC-'
else:
print('Error : Network should be either [LeNet / AlexNet /SqueezeNet/ 3Conv3FC')
sys.exit(0)
return net, file_name
# Model
print('\n[Phase 2] : Model setup')
if args.resume:
# Load checkpoint
print('| Resuming from checkpoint...')
assert os.path.isdir('checkpoint'), 'Error: No checkpoint directory found!'
_, file_name = getNetwork(args)
checkpoint = torch.load('./checkpoint/'+args.dataset+os.sep+file_name+'.t7')
net = checkpoint['net']
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
else:
print('| Building net type [' + args.net_type + ']...')
net, file_name = getNetwork(args)
if use_cuda:
net.cuda()
vi = GaussianVariationalInference(torch.nn.CrossEntropyLoss())
logfile = os.path.join('diagnostics_Bayes{}_{}.txt'.format(args.net_type, args.dataset))
# Training
def train(epoch):
net.train()
train_loss = 0
correct = 0
total = 0
m = math.ceil(len(trainset) / batch_size)
optimizer = optim.Adam(net.parameters(), lr=cf.learning_rate(args.lr, epoch), weight_decay=args.weight_decay)
print('\n=> Training Epoch #%d, LR=%.4f' %(epoch, cf.learning_rate(args.lr, epoch)))
for batch_idx, (inputs_value, targets) in enumerate(trainloader):
# repeat samples for
x = inputs_value.view(-1, inputs, resize, resize).repeat(args.num_samples, 1, 1, 1)
print(x.shape)
y = targets.repeat(args.num_samples)
if use_cuda:
x, y = x.cuda(), y.cuda() # GPU settings
if args.beta_type is "Blundell":
beta = 2 ** (m - (batch_idx + 1)) / (2 ** m - 1)
elif args.beta_type is "Soenderby":
beta = min(epoch / (num_epochs // 4), 1)
elif args.beta_type is "Standard":
beta = 1 / m
else:
beta = 0
# Forward Propagation
x, y = Variable(x), Variable(y)
outputs, kl = net.probforward(x)
#print(outputs.shape)
loss = vi(outputs, y, kl, beta) # Loss
optimizer.zero_grad()
loss.backward() # Backward Propagation
optimizer.step() # Optimizer update
train_loss += loss.data
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(y.data).cpu().sum()
sys.stdout.write('\r')
sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tLoss: %.4f Acc@1: %.3f%%' %(epoch, num_epochs, batch_idx+1,
(len(trainset)//batch_size)+1, loss.data, (100*correct/total)/args.num_samples))
sys.stdout.flush()
#diagnostics_to_write = {'Epoch': epoch, 'Loss': loss.data[0], 'Accuracy': (100*correct/total)/args.num_samples}
diagnostics_to_write = {'Epoch': epoch, 'Loss': loss.data, 'Accuracy': (100*correct/total)/args.num_samples}
with open(logfile, 'a') as lf:
lf.write(str(diagnostics_to_write))
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
conf=[]
m = math.ceil(len(testset) / batch_size)
for batch_idx, (inputs_value, targets) in enumerate(testloader):
x = inputs_value.view(-1, inputs, resize, resize).repeat(args.num_samples, 1, 1, 1)
y = targets.repeat(args.num_samples)
if use_cuda:
x, y = x.cuda(), y.cuda()
with torch.no_grad():
x, y = Variable(x), Variable(y)
outputs, kl = net.probforward(x)
if args.beta_type is "Blundell":
beta = 2 ** (m - (batch_idx + 1)) / (2 ** m - 1)
elif args.beta_type is "Soenderby":
beta = min(epoch / (num_epochs // 4), 1)
elif args.beta_type is "Standard":
beta = 1 / m
else:
beta = 0
loss = vi(outputs, y, kl, beta)
#test_loss += loss.data[0]
test_loss += loss.data
_, predicted = torch.max(outputs.data, 1)
preds = F.softmax(outputs, dim=1)
results = torch.topk(preds.cpu().data, k=1, dim=1)
conf.append(results[0][0].item())
total += targets.size(0)
correct += predicted.eq(y.data).cpu().sum()
# Save checkpoint when best model
p_hat = np.array(conf)
confidence_mean = np.mean(p_hat, axis=0)
confidence_var = np.var(p_hat, axis=0)
epistemic = np.mean(p_hat ** 2, axis=0) - np.mean(p_hat, axis=0) ** 2
aleatoric = np.mean(p_hat * (1 - p_hat), axis=0)
acc =(100*correct/total)/args.num_samples
#print('\n| Validation Epoch #%d\t\t\tLoss: %.4f Acc@1: %.2f%%' %(epoch, loss.data[0], acc))
print('\n| Validation Epoch #%d\t\t\tLoss: %.4f Acc@1: %.2f%%' %(epoch, loss.data, acc))
#test_diagnostics_to_write = {'Validation Epoch': epoch, 'Loss': loss.data[0], 'Accuracy': acc}
test_diagnostics_to_write = {'Validation Epoch': epoch, 'Loss': loss.data, 'Accuracy': acc}
with open(logfile, 'a') as lf:
lf.write(str(test_diagnostics_to_write))
if acc > best_acc:
print('| Saving Best model...\t\t\tTop1 = %.2f%%' %(acc))
state = {
'net':net if use_cuda else net,
'acc':acc,
'epoch':epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
save_point = './checkpoint/'+args.dataset+os.sep
if not os.path.isdir(save_point):
os.mkdir(save_point)
torch.save(state, save_point+file_name+'.t7')
best_acc = acc
print('\n[Phase 3] : Training model')
print('| Training Epochs = ' + str(num_epochs))
print('| Initial Learning Rate = ' + str(args.lr))
print('| Optimizer = ' + str(optim_type))
elapsed_time = 0
for epoch in range(start_epoch, start_epoch+num_epochs):
start_time = time.time()
train(epoch)
test(epoch)
epoch_time = time.time() - start_time
elapsed_time += epoch_time
print('| Elapsed time : %d:%02d:%02d' %(cf.get_hms(elapsed_time)))
print('\n[Phase 4] : Testing model')
print('* Test results : Acc@1 = %.2f%%' %(best_acc))
| StarcoderdataPython |
6639373 | <filename>tools/perf-scale-workload/devops_query_driver.py
##################################################
## A multi-process and multi-threaded driver #####
## that executes the specified query workload ####
## simulating concurrent user sessions querying ##
## recent and historical data ingested into ######
## the specified database and table in Timestream
##################################################
import query_executer as tsb_query
from query_execution_utils import Query
import timestreamquery as tsquery
import argparse
import os
from pathlib import Path
import multiprocessing
from timeit import default_timer as timer
import datetime
######################################
########## Query Workload ###########
######################################
##
## This query emulates an alerting use-case where the goal is to obtain the aggregate values
## of a metric for the specified time period for a given region, cell, silo, availability_zone
## and microservice_name. The application can fire an alert if the metric aggregate values are
## above or below a threshold.
##
q1Str = """
SELECT region, cell, silo, availability_zone, microservice_name,
BIN(time, 1m) AS time_bin,
COUNT(DISTINCT instance_name) AS num_hosts,
ROUND(AVG(measure_value::double), 2) AS avg_value,
ROUND(APPROX_PERCENTILE(measure_value::double, 0.9), 2) AS p90_value,
ROUND(APPROX_PERCENTILE(measure_value::double, 0.95), 2) AS p95_value,
ROUND(APPROX_PERCENTILE(measure_value::double, 0.99), 2) AS p99_value
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND measure_name = '{4}'
AND region = '{5}' AND cell = '{6}'
AND silo = '{7}' AND availability_zone = '{8}'
AND microservice_name = '{9}'
GROUP BY region, cell, silo, availability_zone, microservice_name, BIN(time, 1m)
ORDER BY p99_value DESC
"""
##
## Variant of Q1 when data is emitted in the MULTI model.
##
q1StrMulti = """
SELECT region, cell, silo, availability_zone, microservice_name,
BIN(time, 1m) AS time_bin,
COUNT(DISTINCT instance_name) AS num_hosts,
ROUND(AVG({4}), 2) AS avg_value,
ROUND(APPROX_PERCENTILE({4}, 0.9), 2) AS p90_value,
ROUND(APPROX_PERCENTILE({4}, 0.95), 2) AS p95_value,
ROUND(APPROX_PERCENTILE({4}, 0.99), 2) AS p99_value
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND measure_name = 'metrics'
AND region = '{5}' AND cell = '{6}'
AND silo = '{7}' AND availability_zone = '{8}'
AND microservice_name = '{9}'
GROUP BY region, cell, silo, availability_zone, microservice_name, BIN(time, 1m)
ORDER BY p99_value DESC
"""
##
## This query emulates an alerting use-case where the goal is to obtain the aggregate values
## of multiple metrics, specifically cpu_user and cpu_system, for the specified time period
## for a given region, cell, silo, availability_zone, microservice_name, instance_type, and
## os_version. The application can fire an alert if the metric aggregate values are
## above or below a threshold.
##
q2Str = """
SELECT BIN(time, 1m) AS time_bin,
AVG(CASE WHEN measure_name = 'cpu_user' THEN measure_value::double ELSE NULL END) AS avg_cpu_user,
AVG(CASE WHEN measure_name = 'cpu_system' THEN measure_value::double ELSE NULL END) AS avg_cpu_system
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND measure_name IN (
'cpu_user', 'cpu_system'
)
AND region = '{4}' AND cell = '{5}' AND silo = '{6}'
AND availability_zone = '{7}' AND microservice_name = '{8}'
AND instance_type = '{9}' AND os_version = '{10}'
GROUP BY BIN(time, 1m)
ORDER BY time_bin desc
"""
##
## Variant of Q2 when data is emitted in the MULTI model
##
q2StrMulti = """
SELECT BIN(time, 1m) AS time_bin,
AVG(cpu_user) AS avg_cpu_user,
AVG(cpu_system) AS avg_cpu_system
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND measure_name = 'metrics'
AND region = '{4}' AND cell = '{5}' AND silo = '{6}'
AND availability_zone = '{7}' AND microservice_name = '{8}'
AND instance_type = '{9}' AND os_version = '{10}'
GROUP BY BIN(time, 1m)
ORDER BY time_bin desc
"""
##
## This query emulates populating a dashboard identifying the instances in a given region,
## and microservice name whose aggregate read of a specifed measure_name is above the
## region-wide aggregate of the metric.
##
q3Str = """
WITH microservice_cell_avg AS (
SELECT cell, microservice_name,
AVG(measure_value::double) AS microservice_avg_metric
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND region = '{4}'
AND measure_name = '{5}'
AND microservice_name = '{6}'
GROUP BY cell, microservice_name
), instance_avg AS (
SELECT cell, microservice_name, instance_name,
AVG(measure_value::double) AS instance_avg_metric
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND region = '{4}'
AND measure_name = '{5}'
AND microservice_name = '{6}'
GROUP BY instance_name, cell, microservice_name
)
SELECT i.*, m.microservice_avg_metric
FROM microservice_cell_avg m INNER JOIN instance_avg i
ON i.cell = m.cell AND i.microservice_name = m.microservice_name
WHERE i.instance_avg_metric > (1 + {7}) * m.microservice_avg_metric
ORDER BY i.instance_avg_metric DESC
"""
##
## Variant of Q3 when data is emitted in the MULTI model
##
q3StrMulti = """
WITH microservice_cell_avg AS (
SELECT cell, microservice_name,
AVG({5}) AS microservice_avg_metric
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND region = '{4}'
AND measure_name = 'metrics'
AND microservice_name = '{6}'
GROUP BY cell, microservice_name
), instance_avg AS (
SELECT cell, microservice_name, instance_name,
AVG({5}) AS instance_avg_metric
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND region = '{4}'
AND measure_name = 'metrics'
AND microservice_name = '{6}'
GROUP BY instance_name, cell, microservice_name
)
SELECT i.*, m.microservice_avg_metric
FROM microservice_cell_avg m INNER JOIN instance_avg i
ON i.cell = m.cell AND i.microservice_name = m.microservice_name
WHERE i.instance_avg_metric > (1 + {7}) * m.microservice_avg_metric
ORDER BY i.instance_avg_metric DESC
"""
##
## This query emulates populating a dashboard with the hourly distribution of different resource
## utilization metrics of the specified microservice_name across differen regions, cells, and
## silos for the past several hours.
##
q4Str = """
SELECT region, silo, cell, microservice_name, BIN(time, 1h) AS hour,
COUNT(CASE WHEN measure_name = 'cpu_user' THEN measure_value::double ELSE NULL END) AS num_cpu_user_samples,
ROUND(AVG(CASE WHEN measure_name = 'cpu_user' THEN measure_value::double ELSE NULL END), 2) AS avg_cpu_user,
ROUND(MAX(CASE WHEN measure_name = 'cpu_user' THEN measure_value::double ELSE NULL END), 2) AS max_cpu_user,
COUNT(CASE WHEN measure_name = 'memory_used' THEN measure_value::double ELSE NULL END) AS num_memory_used_samples,
ROUND(AVG(CASE WHEN measure_name = 'memory_used' THEN measure_value::double ELSE NULL END), 2) AS avg_memory_used,
ROUND(MAX(CASE WHEN measure_name = 'memory_used' THEN measure_value::double ELSE NULL END), 2) AS max_memory_used
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND measure_name IN (
'cpu_user', 'memory_used'
)
AND microservice_name = '{4}'
GROUP BY silo, cell, region, microservice_name, BIN(time, 1h)
ORDER BY region, silo, cell, microservice_name, hour DESC
"""
##
## Variant of Q4 when data is emitted in the MULTI model
##
q4StrMulti = """
SELECT region, silo, cell, microservice_name, BIN(time, 1h) AS hour,
COUNT(cpu_user) AS num_cpu_user_samples,
ROUND(AVG(cpu_user), 2) AS avg_cpu_user,
ROUND(MAX(cpu_user), 2) AS max_cpu_user,
COUNT(memory_used) AS num_memory_used_samples,
ROUND(AVG(memory_used), 2) AS avg_memory_used,
ROUND(MAX(memory_used), 2) AS max_memory_used
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND measure_name = 'metrics'
AND microservice_name = '{4}'
GROUP BY silo, cell, region, microservice_name, BIN(time, 1h)
ORDER BY region, silo, cell, microservice_name, hour DESC
"""
##
## This query emulates an analysis scenario to find the instances with the highest memory
## utilization across the different microservices and then compute the distribution of gc_pause for those
## instances over the past day. The region, cell, silo, and availability_zone are specified.
##
q5Str = """
WITH per_instance_memory_used AS (
SELECT silo, microservice_name, instance_name, BIN(time, 5m) AS time_bin,
MAX(measure_value::double) AS max_memory
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND measure_name = 'memory_used'
AND region = '{4}' AND cell = '{5}'
AND silo = '{6}' AND availability_zone = '{7}'
GROUP BY microservice_name, instance_name, BIN(time, 5m), silo
), per_microservice_memory AS (
SELECT silo, microservice_name,
APPROX_PERCENTILE(max_memory, 0.95) AS p95_max_memory
FROM per_instance_memory_used
GROUP BY silo, microservice_name
), per_silo_ranked AS (
SELECT silo, microservice_name,
DENSE_RANK() OVER (PARTITION BY silo ORDER BY p95_max_memory DESC) AS rank
FROM per_microservice_memory
), instances_with_high_memory AS (
SELECT r.silo, r.microservice_name, m.instance_name,
APPROX_PERCENTILE(max_memory, 0.95) AS p95_max_memory
FROM per_silo_ranked r INNER JOIN per_instance_memory_used m
ON r.silo = m.silo AND r.microservice_name = m.microservice_name
WHERE r.rank = 1
GROUP BY m.instance_name, r.silo, r.microservice_name
), ranked_instances AS (
SELECT silo, microservice_name, instance_name,
DENSE_RANK() OVER (PARTITION BY silo, microservice_name ORDER BY p95_max_memory DESC) AS rank
FROM instances_with_high_memory
)
SELECT t.silo, t.microservice_name, t.instance_name,
MIN(measure_value::double) AS min_gc_pause,
ROUND(AVG(measure_value::double), 2) AS avg_gc_pause,
ROUND(STDDEV(measure_value::double), 2) AS stddev_gc_pause,
ROUND(APPROX_PERCENTILE(measure_value::double, 0.5), 2) AS p50_gc_pause,
ROUND(APPROX_PERCENTILE(measure_value::double, 0.9), 2) AS p90_gc_pause,
ROUND(APPROX_PERCENTILE(measure_value::double, 0.99), 2) AS p99_gc_pause
FROM ranked_instances r INNER JOIN {0}.{1} t ON
r.silo = t.silo AND
r.microservice_name = t.microservice_name AND r.instance_name = t.instance_name
WHERE time BETWEEN {2} AND {3}
AND measure_name = 'gc_pause' AND rank = 1
GROUP BY t.instance_name, t.silo, t.microservice_name
"""
##
## Variant of Q5 when data is emitted in the MULTI model
##
q5StrMulti = """
WITH per_instance_memory_used AS (
SELECT silo, microservice_name, instance_name, BIN(time, 5m) AS time_bin,
MAX(memory_used) AS max_memory
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND measure_name = 'metrics'
AND region = '{4}' AND cell = '{5}'
AND silo = '{6}' AND availability_zone = '{7}'
GROUP BY microservice_name, instance_name, BIN(time, 5m), silo
), per_microservice_memory AS (
SELECT silo, microservice_name,
APPROX_PERCENTILE(max_memory, 0.95) AS p95_max_memory
FROM per_instance_memory_used
GROUP BY silo, microservice_name
), per_silo_ranked AS (
SELECT silo, microservice_name,
DENSE_RANK() OVER (PARTITION BY silo ORDER BY p95_max_memory DESC) AS rank
FROM per_microservice_memory
), instances_with_high_memory AS (
SELECT r.silo, r.microservice_name, m.instance_name,
APPROX_PERCENTILE(max_memory, 0.95) AS p95_max_memory
FROM per_silo_ranked r INNER JOIN per_instance_memory_used m
ON r.silo = m.silo AND r.microservice_name = m.microservice_name
WHERE r.rank = 1
GROUP BY m.instance_name, r.silo, r.microservice_name
), ranked_instances AS (
SELECT silo, microservice_name, instance_name,
DENSE_RANK() OVER (PARTITION BY silo, microservice_name ORDER BY p95_max_memory DESC) AS rank
FROM instances_with_high_memory
)
SELECT t.silo, t.microservice_name, t.instance_name,
MIN(gc_pause) AS min_gc_pause,
ROUND(AVG(gc_pause), 2) AS avg_gc_pause,
ROUND(STDDEV(gc_pause), 2) AS stddev_gc_pause,
ROUND(APPROX_PERCENTILE(gc_pause, 0.5), 2) AS p50_gc_pause,
ROUND(APPROX_PERCENTILE(gc_pause, 0.9), 2) AS p90_gc_pause,
ROUND(APPROX_PERCENTILE(gc_pause, 0.99), 2) AS p99_gc_pause
FROM ranked_instances r INNER JOIN {0}.{1} t ON
r.silo = t.silo AND
r.microservice_name = t.microservice_name AND r.instance_name = t.instance_name
WHERE time BETWEEN {2} AND {3}
AND measure_name = 'events' AND rank = 1
GROUP BY t.instance_name, t.silo, t.microservice_name
"""
##
## This query emulates an analysis scenario to find the hours of the day with highest CPU utilization
## across microservices for the specified region and cell.
##
q6Str = """
WITH per_instance_cpu_used AS (
SELECT microservice_name, instance_name, BIN(time, 15m) AS time_bin,
AVG(measure_value::double) AS avg_cpu
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND measure_name = 'cpu_user'
AND region = '{4}'
AND cell = '{5}'
GROUP BY instance_name, microservice_name, BIN(time, 15m)
), per_microservice_cpu AS (
SELECT microservice_name, HOUR(time_bin) AS hour, BIN(time_bin, 24h) AS day,
APPROX_PERCENTILE(avg_cpu, 0.95) AS p95_avg_cpu
FROM per_instance_cpu_used
GROUP BY HOUR(time_bin), BIN(time_bin, 24h), microservice_name
), per_microservice_ranked AS (
SELECT microservice_name, day, hour, p95_avg_cpu,
DENSE_RANK() OVER (PARTITION BY microservice_name, day ORDER BY p95_avg_cpu DESC) AS rank
FROM per_microservice_cpu
)
SELECT microservice_name, day, hour AS hour, p95_avg_cpu
FROM per_microservice_ranked
WHERE rank <= 3
ORDER BY microservice_name, day, rank ASC
"""
##
## Variant of Q5 when data is emitted in the MULTI model
##
q6StrMulti = """
WITH per_instance_cpu_used AS (
SELECT microservice_name, instance_name, BIN(time, 15m) AS time_bin,
AVG(cpu_user) AS avg_cpu
FROM {0}.{1}
WHERE time BETWEEN {2} AND {3}
AND measure_name = 'metrics'
AND region = '{4}'
AND cell = '{5}'
GROUP BY instance_name, microservice_name, BIN(time, 15m)
), per_microservice_cpu AS (
SELECT microservice_name, HOUR(time_bin) AS hour, BIN(time_bin, 24h) AS day,
APPROX_PERCENTILE(avg_cpu, 0.95) AS p95_avg_cpu
FROM per_instance_cpu_used
GROUP BY HOUR(time_bin), BIN(time_bin, 24h), microservice_name
), per_microservice_ranked AS (
SELECT microservice_name, day, hour, p95_avg_cpu,
DENSE_RANK() OVER (PARTITION BY microservice_name, day ORDER BY p95_avg_cpu DESC) AS rank
FROM per_microservice_cpu
)
SELECT microservice_name, day, hour AS hour, p95_avg_cpu
FROM per_microservice_ranked
WHERE rank <= 3
ORDER BY microservice_name, day, rank ASC
"""
q1 = "do-q1"
q2 = "do-q2"
q3 = "do-q3"
q4 = "do-q4"
q5 = "do-q5"
q6 = "do-q6"
### Create the query instances based on the specified parameters.
def createQueryInstances(params, endTime = "now()", wide = False):
if wide:
queries = {
## Alerting query analyzing an hour of data.
q1 : Query(q1StrMulti, tsb_query.QueryParams(1000, (params.dbname, params.tablename, "{} - 1h".format(endTime), "{}".format(endTime), "memory_used", params.region, params.cell, params.silo, params.az, params.microservicename))),
## Alerting query analyzing an hour of data.
q2 : Query(q2StrMulti, tsb_query.QueryParams(1000, (params.dbname, params.tablename, "{} - 1h".format(endTime), "{}".format(endTime), params.region, params.cell, params.silo, params.az, params.microservicename, params.instancetype, params.osversion))),
## Populating a dashboard analyzing an hour of data.
q3 : Query(q3StrMulti, tsb_query.QueryParams(50, (params.dbname, params.tablename, "{} - 1h".format(endTime), "{}".format(endTime), params.region, "disk_used", params.microservicename, 0.2))),
## Populating a dashboard analyzing three hours of data.
q4 : Query(q4StrMulti, tsb_query.QueryParams(50, (params.dbname, params.tablename, "{} - 3h".format(endTime), "{}".format(endTime), params.microservicename))),
## Analysis query processing 1 day of data.
q5 : Query(q5StrMulti, tsb_query.QueryParams(20, (params.dbname, params.tablename, "{} - 1d".format(endTime), "{}".format(endTime), params.region, params.cell, params.silo, params.az))),
## Analysis query processing 3 days of data.
q6 : Query(q6StrMulti, tsb_query.QueryParams(20, (params.dbname, params.tablename, "{} - 3d".format(endTime), "{}".format(endTime), params.region, params.cell)))
}
else:
queries = {
## Alerting query analyzing an hour of data.
q1 : Query(q1Str, tsb_query.QueryParams(1000, (params.dbname, params.tablename, "{} - 1h".format(endTime), "{}".format(endTime), "memory_used", params.region, params.cell, params.silo, params.az, params.microservicename))),
## Alerting query analyzing an hour of data.
q2 : Query(q2Str, tsb_query.QueryParams(1000, (params.dbname, params.tablename, "{} - 1h".format(endTime), "{}".format(endTime), params.region, params.cell, params.silo, params.az, params.microservicename, params.instancetype, params.osversion))),
## Populating a dashboard analyzing an hour of data.
q3 : Query(q3Str, tsb_query.QueryParams(50, (params.dbname, params.tablename, "{} - 1h".format(endTime), "{}".format(endTime), params.region, "disk_used", params.microservicename, 0.2))),
## Populating a dashboard analyzing three hours of data.
q4 : Query(q4Str, tsb_query.QueryParams(50, (params.dbname, params.tablename, "{} - 3h".format(endTime), "{}".format(endTime), params.microservicename))),
## Analysis query processing 1 day of data.
q5 : Query(q5Str, tsb_query.QueryParams(20, (params.dbname, params.tablename, "{} - 1d".format(endTime), "{}".format(endTime), params.region, params.cell, params.silo, params.az))),
## Analysis query processing 3 days of data.
q6 : Query(q6Str, tsb_query.QueryParams(20, (params.dbname, params.tablename, "{} - 3d".format(endTime), "{}".format(endTime), params.region, params.cell)))
}
return queries
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog = 'DevOps Query Driver', description='Execute the Ingestion Driver for DevOps Workload.')
parser.add_argument('--database-name', '-d', dest="databaseName", action = "store", required = True, help = "The database name for the workload.")
parser.add_argument('--table-name', '-t', dest="tableName", action = "store", required = True, help = "The table name for the workload.")
parser.add_argument('--region', '-r', action = "store", required = True, help="Specify the region where the Timestream database is located.")
parser.add_argument('--endpoint', '-e', action = "store", default = None, help="Specify the endpoint where the Timestream database is located.")
parser.add_argument('--config', action = "store", type = str, required = True, help = "A configuration file defining properties of the workload")
parser.add_argument('--concurrency', '-c', action = "store", type = int, default = 1, help = "Number of concurrent threads to use (default: 1)")
parser.add_argument('--processes', '-p', action = "store", type = int, default = 1, help = "Number of concurrent processes to use (default: 1)")
parser.add_argument('--log-dir', '-l', dest="logDir", action = "store", default = str(os.path.join(Path.home(), 'timestream_perf')), help = "The directory to log experiment results (default: ~/timestream_perf)")
parser.add_argument('--run-prefix', dest = "runPrefix", action = "store", default = None, help = "Identifier for the run.")
parser.add_argument('--query-end-time', dest = "queryEndTime", action = "store", default = "now()", help = "The interval end time for the query time predicates (default: 'now()')")
parser.add_argument('--repetitions', action = "store", type = int, default = 0, help = "Whether to override the repetitions (default: 0)")
parser.add_argument('--multi', dest = "wide", action = "store_true", help = "Enable queries in the wide format.")
parser.add_argument('--profile', action = "store", type = str, default= None, help = "The AWS profile to use.")
parser.add_argument('--think-time-milliseconds', dest = "thinkTimeMillis", action = "store", type = int, default = 30000, help = "Think time (in ms) between queries (default: 30000)")
parser.add_argument('--randomized-think-time', dest = "randomizedThink", action = "store_true", help = "Use a randomized think time")
parser.add_argument('--fixed-params', dest = "fixedParams", action = "store_true", help = "Whether to use fixed parameters for queries or get the query parameters dynamically from database.")
args = parser.parse_args()
print(args)
if args.runPrefix == None:
args.runPrefix = args.endpoint
workloadStart = timer()
startTime = datetime.datetime.utcnow()
processes = list()
for processId in range(1, args.processes + 1):
parentConn, childConn = multiprocessing.Pipe()
process = tsb_query.MultiProcessQueryWorker(processId, args, startTime, createQueryInstances, childConn)
process.start()
processes.append((process, parentConn))
queryCount = 0
outputs = dict()
queryCount = 0
outputs = dict()
for p, conn in processes:
output = conn.recv()
p.join()
if "Outputs" in output:
outputs[p.processId] = output["Outputs"]
queryCount += output["Count"]
else:
print("Process {} exited with error.".format(p.processId))
outputs[p.processId] = dict()
workloadEnd = timer()
print("Experiment {} complete".format(args.runPrefix))
for pidVals in outputs.values():
for tidVals in pidVals.values():
for ops in tidVals:
print(ops)
tps = queryCount / (workloadEnd - workloadStart)
print("TPS: {}".format(round(tps, 3)))
| StarcoderdataPython |
9678181 | from flask import Blueprint
user = Blueprint('user', __name__, template_folder='templates',static_folder='static')
from ytegg.user import views
| StarcoderdataPython |
1863991 | <reponame>tanaydw/CenterNet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import cv2
import numpy as np
from opts import opts
from detectors.detector_factory import detector_factory
image_ext = ['jpg', 'jpeg', 'png', 'webp']
video_ext = ['mp4', 'mov', 'avi', 'mkv']
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
def combine_images(left, right):
rows_rgb, cols_rgb, channels = left.shape
rows_gray, cols_gray, _ = right.shape
rows_comb = max(rows_rgb, rows_gray)
cols_comb = cols_rgb + cols_gray
comb = np.zeros(shape=(rows_comb, cols_comb, channels), dtype=np.uint8)
comb[:rows_rgb, :cols_rgb] = left
comb[:rows_gray, cols_rgb:] = right
return comb
def undistort_image(img):
"""
A custom function to undistort image
just for Negeley-Black Video.
"""
h, w = img.shape[:2]
mtx = np.array([
[3389.14855, 0, 982.985434],
[0, 3784.14471, 556.363307],
[0, 0, 1]]
)
dist = np.array([-1.83418584, 12.2930625, -0.00434882103, 0.0226389517, -85.1805652])
# undistort
img = cv2.undistort(img, mtx, dist)
return img
def demo(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.debug = max(opt.debug, 1)
Detector = detector_factory[opt.task]
detector = Detector(opt)
if not os.path.exists('./results'):
os.makedirs('./results')
if opt.demo == 'webcam' or \
opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext:
cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)
out_name = (opt.demo.split('/')[-1]).split('.')[0]
detector.pause = False
cnt = 0
while True:
_, img = cam.read()
if img is None:
try:
out.release()
except:
print('File not found!!!')
return
cnt += 1
# cv2.imshow('input', img)
img = undistort_image(img)
ret = detector.run(img)
time_str = ''
for stat in time_stats:
time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
print('Frame ' + str(cnt) + ' |' + time_str)
img = combine_images(ret['add_pred'], ret['bird_pred'])
if cnt == 1:
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter('./results/{}_output.avi'.format(out_name),
fourcc, 10, (img.shape[1], img.shape[0]))
out.write(img)
if cv2.waitKey(1) == 27:
return # esc to quit
else:
if os.path.isdir(opt.demo):
image_names = []
ls = os.listdir(opt.demo)
for file_name in sorted(ls):
ext = file_name[file_name.rfind('.') + 1:].lower()
if ext in image_ext:
image_names.append(os.path.join(opt.demo, file_name))
else:
image_names = [opt.demo]
for (image_name) in image_names:
ret = detector.run(image_name)
img_name = (image_name.split('/')[-1]).split('.')[0]
time_str = ''
for stat in time_stats:
time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
print(time_str)
cv2.imwrite('./results' + '/{}_add_pred.png'.format(img_name), ret['add_pred'])
cv2.imwrite('./results' + '/{}_bird_pred.png'.format(img_name), ret['bird_pred'])
if __name__ == '__main__':
opt = opts().init()
demo(opt)
| StarcoderdataPython |
6459527 | <filename>joybusutils/tinymodule.py
# This is an example module to help me get started with nmigen
# It should be a module that sets a signal high after 5 clock cycles.
from nmigen import Elaboratable, Signal, Module
from nmigen.sim.pysim import Simulator, Tick
from tabulate import tabulate
class TinyModule(Elaboratable):
def __init__(self):
self.counter = Signal(5)
self.output = Signal()
def ports(self):
return [self.output]
def elaborate(self, platform):
m = Module()
m.d.sync += self.counter.eq(self.counter + 1)
# Once high, output should stay high.
with m.If(self.output == 1):
m.d.comb += self.output.eq(1)
# Otherwise, wait for 5 clock ticks
with m.Elif(self.counter == 5):
m.d.comb += self.output.eq(1)
return m
if __name__ == "__main__":
tinymodule = TinyModule()
sim = Simulator(tinymodule)
sim_results = []
def process():
# Enough ticks for the counter to overflow
for i in range(35):
sim_results.append([i, (yield tinymodule.counter), (yield tinymodule.output)])
yield Tick()
sim.add_sync_process(process)
# 12mhz clock
sim.add_clock(1/12_000_000)
with sim.write_vcd("tinymodule_sim.vcd", "tinymodule_sim.gtkw", traces=tinymodule.ports()):
sim.run()
print(tabulate(sim_results, headers=["Clock", "Counter", "Output"]))
| StarcoderdataPython |
9734343 | <reponame>brihijoshi/swaad<filename>app/flask_test.py
from flask import Flask, request, Response
import os
import json
from werkzeug import utils
from aws_detect import detect_labels_local_file
from food2fork import get_recipes
app = Flask(__name__)
label_list = []
@app.route('/handshake', methods=['POST'])
def handshake():
print("hit handshake endpoint")
label_list.clear()
clear_files()
return Response(response=None, status=200, mimetype="application/json")
@app.route('/ingredient', methods=['POST'])
def ingredient():
print("hit ingredient endpoint")
image_file = request.files['image']
max = get_latest_file_id()
filename = utils.secure_filename(str(max + 1))
image_file.save("images/{}.jpg".format(filename))
label = detect_labels_local_file("images/{}.jpg".format(filename))
print(label)
if label:
label_list.append(label)
return Response(response=label, status=200, mimetype="application/json")
@app.route('/recipe', methods=['POST'])
def recipe():
if len(label_list) == 0:
return Response(response=None, status=400, mimetype="application/json")
print(str(label_list))
response = get_recipes(label_list)
if response is None:
return Response(response=None, status=200, mimetype="application/json")
label_list.clear()
clear_files()
return Response(response=json.dumps({"results": response}), status=200, mimetype="application/json")
def clear_files():
for file in os.listdir("images"):
if file.endswith("jpg"):
os.unlink(os.path.join("images", file))
def get_latest_file_id():
max = 1
for file in os.listdir("images"):
if file.endswith("jpg"):
file_id = file.replace(".jpg", "")
if int(file_id) > max:
max = int(file_id)
return max
# start flask app
clear_files()
app.run(host="0.0.0.0", port=80) | StarcoderdataPython |
9796510 | <filename>my intereseted short codes/Omniaz/test_HOG.py
#%%
#importing required libraries
from skimage.io import imread
from skimage.transform import resize
from skimage.feature import hog
from skimage import exposure
import matplotlib.pyplot as plt
# reading the image
img = imread('1.jpg')
plt.axis("off")
plt.imshow(img)
print(img.shape)
# resizing image
resized_img = resize(img, (128*4, 64*4))
plt.axis("off")
plt.imshow(resized_img)
print(resized_img.shape)
#creating hog features
fd, hog_image = hog(resized_img, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(2, 2), visualize=True, multichannel=True)
plt.axis("off")
plt.imshow(hog_image, cmap="gray")
# %%
feature = fd[fd>(max(fd)/2)]
print(feature)
# %%
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.