index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
983,600 | a6bb0d83bbb1836103fb4edd82e31a4c7176f0f8 | """
Unittest for familytree.csv_parser module.
"""
from pathlib import Path
from tempfile import TemporaryDirectory
from familytree.csv_parser import parse_csv
from familytree.person import Person
from familytree.table import Table
SAMPLE_CSV = """
id,family_name,first_name,gender,father_id,mother_id,birth_order,spouse_id
1, XXX, AAA, M, , , , 2
2, XXX, BBB, F, , , , 1
3, XXX, CCC, M, 1, 2, 1,
""".lstrip('\n')
class TestParseCsv:
def test(self):
with TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
csv_path = tmp_dir / 'sample.csv'
with csv_path.open('w') as f:
f.write(SAMPLE_CSV)
table = parse_csv(str(csv_path))
assert len(table) == 3
persons = list(table)
assert persons[0] == Person(1, "XXX AAA", "M", spouse_id=2)
assert persons[1] == Person(2, "XXX BBB", "F", spouse_id=1)
assert persons[2] == Person(3, "XXX CCC", "M",
father_id=1, mother_id=2, birth_order=1)
|
983,601 | ea9192430adf2667f623090049cfa6e367ea3d62 | from datetime import datetime
import peewee as orm
from connectors.sqlite import db_sqlite
from models.users import User
class Note(orm.Model):
author = orm.ForeignKeyField(User)
title = orm.CharField(max_length=160)
body = orm.TextField()
created_at = orm.DateTimeField()
edited_at = orm.DateTimeField(null=True)
class Meta:
database = db_sqlite
|
983,602 | 39202a4a8d117cafbf106f71239c72ef07d65c57 | import json
import bcrypt
import jwt
from django.test import TestCase, Client, TransactionTestCase
from random import randint
from unittest.mock import patch
from unittest.mock import Mock, MagicMock, call
from user.models import User, PhoneVerification, UserGrade
from my_settings import (
SECRET,
ALGORITHM,
)
client = Client()
class SendSmsTest(TransactionTestCase):
def test_send_sms(self):
client = Client()
data = {
'phone' : '01085327254'
}
response = client.post('/user/sms', json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(response.json(), {'message': 'SUCCESS'})
class CheckSmsTest(TransactionTestCase):
def setUp(self):
data = {
'phone': '01085327254'
}
phone = data['phone']
code = str(randint(100000, 999999))
PhoneVerification.objects.update_or_create(
phone=phone,
code=code,
)
def tearDown(self):
PhoneVerification.objects.all().delete()
User.objects.all().delete()
def test_check_sms(self):
client = Client()
data = {
'phone' : '01085327254',
'code' : PhoneVerification.objects.get(phone = '01085327254').code
}
response = client.post('/user/sms_verification', json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'message': 'SUCCESS'})
class SignUpTest(TestCase):
def setUp(self):
hashed_pw = bcrypt.hashpw('wecode1@'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8')
data = {
'phone': '01085327254'
}
phone = data['phone']
code = str(randint(100000, 999999))
UserGrade.objects.create(
id=1,
name='기본',
description='기본회원입니다'
)
PhoneVerification.objects.update_or_create(
phone=phone,
code=code,
id=1
)
User.objects.create(
id=1,
name='테스트',
email='test@naver.com',
password=hashed_pw,
nickname='테스트',
grade_id=1
)
def tearDown(self):
PhoneVerification.objects.all().delete()
User.objects.all().delete()
UserGrade.objects.all().delete()
def test_singup(self):
client = Client()
data = {
'name' : 'kim',
'email' : 'we@naver.com',
'password' : 'wecode1@',
'phone' : '01235327230'
}
response = client.post('/user/signup', json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(response.json(), {'message': 'SUCCESS'})
class SignInTest(TestCase):
def setUp(self):
hashed_pw = bcrypt.hashpw('wecode1@'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8')
UserGrade.objects.create(
id=1,
name='기본',
description='기본회원입니다'
)
User.objects.create(
name='kim',
email='kim@naver.com',
password=hashed_pw,
number='01988332223',
grade_id = 1
)
PhoneVerification.objects.update_or_create(
phone="01044442222",
code="000022",
id=1
)
def tearDown(self):
UserGrade.objects.all().delete()
User.objects.all().delete()
PhoneVerification.objects.all().delete()
def test_signin(self):
client = Client()
data = {
'email' : 'kim@naver.com',
'password' : 'wecode1@'
}
response = client.post('/user/signin', json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'message': 'SUCCESS', 'access_token': response.json()['access_token']})
class ProfileDataTest(TestCase):
def setUp(self):
PhoneVerification.objects.create(
id = 1
)
UserGrade.objects.create(
id=1,
name='기본',
description='기본회원입니다'
)
User.objects.create(
id=1,
name='테스트',
email='test@naver.com',
password='asdfasdf',
nickname='테스트',
grade_id=1
)
def tearDown(self):
PhoneVerification.objects.all().delete()
UserGrade.objects.all().delete()
User.objects.all().delete()
def test_profiledata(self):
client = Client()
response = client.get('/user/profiledata', content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(),{
'message':'SUCCESS','result':{
'name':'we',
'email' : 'wecode@naver.com',
'number' : '01033334444',
'image' : 'https://s3.ap-northeast-2.amazonaws.com/ac101/49c40c1c510411ebb06bc4b301cd19f5',
'nickname' : 'qqq'
}
}) |
983,603 | 2ea051afa6db164d210c012a9d03cc8701e76c63 | # Generated by Django 3.2.7 on 2021-09-27 18:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Content',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name': 'Контент',
'verbose_name_plural': 'Контент',
'db_table': 'content',
},
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Название')),
],
options={
'verbose_name': 'Меню',
'verbose_name_plural': 'Меню',
'db_table': 'menu',
},
),
migrations.CreateModel(
name='PageContent',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('page_content', models.TextField(verbose_name='Тексты')),
],
options={
'verbose_name': 'Содержание страницы',
'verbose_name_plural': 'Содержание страниц',
'db_table': 'page_content',
},
),
migrations.CreateModel(
name='StaticPages',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Название страницы')),
('slug', models.SlugField(unique=True, verbose_name='Ссылка на страницу')),
('content', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='menu.content')),
('menu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menu.menu')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='menu.staticpages')),
],
options={
'verbose_name': 'Страница',
'verbose_name_plural': 'Страницы',
'db_table': 'static_pages',
},
),
migrations.AddField(
model_name='content',
name='text',
field=models.ManyToManyField(to='menu.PageContent'),
),
]
|
983,604 | 08ec3489673e958acc93babaac29585209e9722d | # Write a program to calculate the resources needed
# to remodel a golf course hole.
# See assignment description for details.
#
# The inputs should be:
# Enter Course Length:
# Enter Course Width:
#
# The outputs should be:
# Total square yards of rough sod:
# Total square yards of smooth sod:
# Tons of sand:
# Number of retaining wall bricks:
# Number of bushes:
# Total Mowing Time (mins):
import math
from math import pi
def calculate_smooth_sod(width):
radius= width / 4
smooth_sod_area= round(2 * pi * radius * radius)
return smooth_sod_area
def calculate_sand_area(width):
radius= width / 6
sand_trap_area= round(pi * radius * radius)
return sand_trap_area;
def calculate_sand_tons(width):
width_in_ft= width * 3
sand_trap_radius= width_in_ft / 6
sand_trap_area= pi * sand_trap_radius * sand_trap_radius
total_sand= round((sand_trap_area * 100)/2000)
return total_sand
def calculate_amount_of_bricks(width):
length_of_wall= pi *(width * 3 / 6)
amount_of_bricks= round(length_of_wall*3)
return amount_of_bricks
def calculate_amount_of_bushes(length, width):
perimeter= 2 * (length + width)
amount_of_bushes= round(perimeter - 2)
return amount_of_bushes
def calculate_rough_sod(length, width,smooth_sod_area, sand_trap_area):
area= length * width
rough_sod_area= round(area-smooth_sod_area-sand_trap_area)
return rough_sod_area
def calculate_time_to_mow(rough_sod_area, smooth_sod_area):
total_mow_time= round((rough_sod_area * 0.5) + (smooth_sod_area + 1))
return total_mow_time
def display(rough_sod_area, smooth_sod_area, total_sand, amount_of_bricks, amount_of_bushes, total_mow_time):
print("Total square yards of rough sod: ",rough_sod_area)
print("Total square yards of smooth sod: ",smooth_sod_area)
print("Tons of sand: ",total_sand)
print("Number of retaining wall bricks: ",amount_of_bricks)
print("Number of bushes: ",amount_of_bushes)
print("Total mowing time (mins): ",(total_mow_time/60))
def call():
length= float(input("Enter Course Lenght: "))
width= float(input("Enter Course Width: "))
smooth_sod_area= calculate_smooth_sod(width)
sand_trap_area= calculate_sand_area(width)
rough_sod_area= calculate_rough_sod(length, width, smooth_sod_area, sand_trap_area)
amount_of_bricks= calculate_amount_of_bricks(width)
total_sand= calculate_sand_tons(width)
amount_of_bushes= calculate_amount_of_bushes(length, width)
total_mow_time= calculate_time_to_mow(rough_sod_area, smooth_sod_area)
display(rough_sod_area, smooth_sod_area, total_sand, amount_of_bricks, amount_of_bushes, total_mow_time)
call()
|
983,605 | 039a4c152cac796db391d203ff9e48ecb5d6d0d7 | import yaml
class ApiSpec(object):
def __init__(self, model):
"""
Provides the API documentation JSON for Swagger UI front end. Swagger JSON specifications can be found
at: http://swagger.io/specification
"""
self.model = model
self.paths = PathOperations()
self.tags = Tags()
class PathOperations(object):
# TODO: Is this class needed?
def __init__(self):
"""
PathOperations Object representing an endpoint. Each PathOperations object must have at least one 'operation' object, which
represents the HTTP method associated with that path.
"""
self.operations = {}
def add_operation(self, operation):
# TODO: Is this needed?
"""
:param operation: dict, Operation class represented as dictionary
"""
operation_dict = operation.__dict__
self.operations.update(operation_dict)
class Operation(object):
def __init__(self, tags=None, summary="", description="", consumes=None, produces=None, parameters=None, responses=None):
"""
Operation Object
:param method: string, HTTP method in lower case (e.g. get, post, put, delete, etc...)
:param model_name: string, model name in lower case
"""
if tags is None:
tags = []
if consumes is None:
consumes = []
if produces is None:
produces = []
if parameters is None:
parameters = []
if responses is None:
responses = {}
if type(tags) is list:
self.tags = tags
else:
raise TypeError
if type(summary) is str:
self.summary = summary
else:
raise TypeError
if type(description) is str:
self.description = description
else:
raise TypeError
if type(consumes) is list:
self.consumes = consumes
else:
raise TypeError
if type(produces) is list:
self.produces = produces
else:
raise TypeError
if type(parameters) is list:
self.parameters = parameters
else:
raise TypeError
if type(responses) is dict:
self.responses = responses
else:
raise TypeError
def yaml_operation_parse(self, path_to_yaml, schema_name):
"""
Parse YAML containing the Swagger information for an operation
:param schema_name: string, name of Schema Object in the Definitions Object representing the Operation schema
:param path_to_yaml: string, absolute path to YAML file containing Swagger Operation details
"""
# TODO: Add validation logic for YAML
with open(path_to_yaml, 'r') as f:
api_doc = yaml.load(f)
self.tags = []
self.summary = api_doc['summary']
self.description = api_doc['description']
if self.valid_content_type(api_doc['consumes']):
self.consumes = api_doc['consumes']
if self.valid_content_type(api_doc['produces']):
self.produces = api_doc['produces']
self.parameters = api_doc['parameters']
self.responses = api_doc['responses']
# TODO: Make sure all operation parameters have been filled with valid values
self.yaml_operation_update(schema_name)
def valid_content_type(self, content_type):
# TODO: Add more content types
# http://swagger.io/specification/#mimeTypes
_valid_content_types = ("application/json", "application/xml")
if content_type in _valid_content_types:
return True
def yaml_operation_update(self, schema_name):
self.tags.append(schema_name)
# Iterate over 'parameters' in YAML
for i, param in enumerate(self.parameters):
if 'schema' in param:
# If 'parameters' in YAML contains the 'schema' key, use it
# TODO: Add custom schema AND validate its format
pass
else:
# If no 'schema' key present in YAML 'parameters', use default of response 'body'
if not schema_name.istitle(): # Convert string to Title Case
schema_name = schema_name.capitalize()
self.parameters[i]['schema'] = {'$ref': '#/definitions/' + schema_name + 'Inputs'}
# TODO: Make 'in' and 'name' descriptors generic
self.parameters[i]['in'] = 'body'
self.parameters[i]['name'] = 'body'
# Iterate over 'responses' in YAML
for code, desc in self.responses.items():
if 'schema' in desc:
# If 'responses' in YAML contains the 'schema' key, use it
# TODO: Add custom schema AND validate its format
pass
else:
# If no 'schema' key present in YAML 'parameters', use default of response 'body'
if not schema_name.istitle(): # Convert string to Title Case
schema_name = schema_name.capitalize()
self.responses[code]['schema'] = {'$ref': '#/definitions/' + schema_name + 'Outputs'}
def get_json(self):
return dict(
tags=self.tags,
summary=self.summary,
description=self.description,
consumes=self.consumes,
produces=self.produces,
parameters=self.parameters,
responses=self.responses
)
# TODO: Update this for other "in"s (e.g. Form Inputs)
class OperationParameters(object):
def __init__(self):
self.json = {
'in': "body",
'name': "body",
'description': "",
'required': True,
'schema': {
"$ref": "#/"
}
}
def update(self, desc="", required=True, definition="#/"):
self.json['description'] = desc
self.json['required'] = required
self.json['schema']['$ref'] = definition
return self.json
class OperationResponses(object):
def __init__(self, return_code, description, schema=None, headers=None):
self.return_code = return_code
self.description = description
self.schema = schema
self.headers = headers
# http://www.restapitutorial.com/httpstatuscodes.html - Using the Top 10 Response Codes*
self.codes = {
"200": "OK",
"201": "Created",
"204": "No Content",
"304": "Not Modified",
"400": "Bad Request",
"401": "Unauthorized",
"403": "Forbidden",
"404": "Not Found",
"409": "Conflict",
"500": "Internal Server Error"
}
if not self.valid_code(str(return_code)):
raise ValueError('Return status code must be: {0!s}'.format(self.codes.keys()))
if schema is None:
self.schema = {
'type': 'object',
"additionalProperties": {
"type": "string"
}
}
def valid_code(self, code):
if code in self.codes.keys():
return True
def get_json(self):
return {
self.return_code: {
'description': self.description,
'schema': self.schema
}
}
class Tags(object):
# TODO: rename to Tags to represent top-level Swagger JSON (list of tags)
def __init__(self):
self.tags = []
def create_tag(self, model_name, description, external_docs=None):
tag = dict(
name=model_name,
description=description
)
if external_docs:
tag.update(dict(
externalDocs=external_docs
))
return tag
def get_tags(self):
return self.tags
|
983,606 | 4f12ca399b3697617c48d687991a87319725986a | import apache_beam as beam
import math
import cv2
import numpy as np
IMAGE_PX_SIZE = 64
HM_SLICES = 64
class normalize_conform_zero_center(beam.DoFn):
def process(self, ctscan):
image = self.normalize(ctscan.segmented_lungs_core)
image = self.conform_img(image, img_px_size=IMAGE_PX_SIZE, hm_slices=HM_SLICES)
image = self.zero_center(image)
ctscan.segmented_lungs_core = None
ctscan.ncz_data = image
yield ctscan
#-----------------------------------------------------------------------------------------------------------
# normalize(image): Take segmented lung image and normalize pixel values to be between 0 and 1
#
# parameters: image - a segmented lung image like that returned from segment_lung_mask(...)
#
# returns: a normalized image
#
#-----------------------------------------------------------------------------------------------------------
def normalize(self,image):
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
#-----------------------------------------------------------------------------------------------------------
# chunks( l, n, hm_slices ): Yields a group of slices as a chunk to be collapsed.
#
# parameters: l - img array
# n - number of slices to be averaged into hm_slices
# hm_slices - number of slices to reduce to
#
# returns: Yields a group of slices as a chunk to be collapsed.
#
#-----------------------------------------------------------------------------------------------------------
def chunks(self, l, n ):
count=0
for i in range(0, len(l), n):
if(count < HM_SLICES):
yield l[i:i + n]
count += 1
#----------------------------------------
# mean(l): Gives the mean of a list
#----------------------------------------
def mean(self,l):
return sum(l) / len(l)
#-----------------------------------------------------------------------------------------------------------
# conform_img(img, img_px_size, hm_slices): Conforms an isotropic hu based image to a standard
# size irrespective of number of slices.
#
# parameters: img - A dicom CT scan stacked, converted to hu, normalized
# img_px_size - the x and y size of an individual slice
# hm_slices - number of slices to reduce to
#
# returns: an array with the conformed image
#
#-----------------------------------------------------------------------------------------------------------
def conform_img(self, img, img_px_size, hm_slices):
resized_CTs = []
new_slices = []
for slice in img:
one_slice_resized = cv2.resize(slice, (img_px_size, img_px_size))
resized_CTs.append(one_slice_resized)
chunk_sizes = int(math.floor(len(resized_CTs) / HM_SLICES))
for slice_chunk in self.chunks(resized_CTs, chunk_sizes):
slice_chunk = list(map(self.mean, zip(*slice_chunk)))
new_slices.append(slice_chunk)
conformed_img = np.array(new_slices)
return conformed_img
#-----------------------------------------------------------------------------------------------------------
# zero_center(image): Shift normalized image data and move the range so it is 0 centered at the PIXEL_MEAN
#
# parameters: image - a segmented lung image like that returned from segment_lung_mask(...)
#
# returns: a zero centered image
#
#-----------------------------------------------------------------------------------------------------------
def zero_center(self, image):
PIXEL_MEAN = 0.25
image = image - PIXEL_MEAN
return image
|
983,607 | aa34db992f8639ec9e2a7e2a5b60827d8babd3a9 | import numpy as np
import pandas as pd
train_data = pd.read_csv("data/train.csv")
ntrain = len(train_data)
test_data = pd.read_csv("data/test.csv")
full_data = pd.concat([train_data, test_data], sort = False, ignore_index = True)
dependent_variable = "SalePrice"
deleted_variables = ["Id", "MSSubClass", "LotFrontage", "Alley", "Utilities", "LandSlope", "Condition2", "YearRemodAdd",
"RoofMatl", "Exterior2nd", "MasVnrType", "MasVnrArea", "ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
"BsmtFinType1", "BsmtFinSF1", "BsmtFinType2", "BsmtFinSF2", "BsmtUnfSF", "Heating", "2ndFlrSF", "LowQualFinSF",
"Electrical", "GrLivArea", "BsmtFullBath", "BsmtHalfBath", "BedroomAbvGr", "KitchenAbvGr", "Functional", "FireplaceQu",
"GarageType", "GarageYrBlt", "GarageFinish", "GarageCars", "GarageQual", "GarageCond", "ScreenPorch", "PoolQC",
"Fence", "MiscFeature", "MoSold", "YrSold", "SaleType"]
full_data = full_data.drop(deleted_variables, axis = 1)
# We drop MSSubClass since it represents categories created by house vendors base on
# age and type of building. Which are already in HouseStyle and Year
# For BldgType, we group (1Fam, TwnhsE) and (Duplex, Twnhs, 2fmCon) to have a single familiy variable
full_data["SingleFam"] = full_data["BldgType"].apply(lambda x: 1 if (x == "1Fam" or x == "TwnhsE") else 0)
full_data = full_data.drop("BldgType", axis = 1)
# OverallQual is categorical but ordered from 1 to 10
# YearBuilt we replace by age
full_data["Age"] = full_data["YearBuilt"].apply(lambda x: 2020 - x)
full_data = full_data.drop("YearBuilt", axis = 1)
# RoofStyle group (Gable, Gambrel, Mansard) and (Hip, Flat, Shed)
# Exterior1st One Nan value
# FullBath : unknown category in testing set, categorical ordered
# Also join FullBath + 0.5 * HalfBath
full_data["NumBath"] = full_data.apply(lambda x: int(x["FullBath"]) + 0.5 * int(x["HalfBath"]), axis = 1)
full_data = full_data.drop(["FullBath", "HalfBath"], axis = 1)
# TotalBsmtSF transform in categorical has basement or not
full_data["HasBasement"] = full_data["TotalBsmtSF"].apply(lambda x: 1 if (x > 0) else 0)
full_data = full_data.drop("TotalBsmtSF", axis = 1)
# We drop BedroomAbvGr and KitchenAbvGr since summarized in TotRmsAbvGrd
# We drop Functional since summarized in OverAllQual
# Fireplaces group by has fireplace or not
full_data["HasFireplace"] = full_data["Fireplaces"].apply(lambda x: 1 if (x > 0) else 0)
full_data = full_data.drop("Fireplaces", axis = 1)
# WoodDeckSF and OpenPorchSF and EnclosedPorch and PoolArea, do categorical, has or not
full_data["HasWoodDeck"] = full_data["WoodDeckSF"].apply(lambda x: 1 if (x > 0) else 0)
full_data = full_data.drop("WoodDeckSF", axis = 1)
full_data["HasOpenPorch"] = full_data["OpenPorchSF"].apply(lambda x: 1 if (x > 0) else 0)
full_data = full_data.drop("OpenPorchSF", axis = 1)
full_data["HasClosedPorch"] = full_data["EnclosedPorch"].apply(lambda x: 1 if (x > 0) else 0)
full_data = full_data.drop("EnclosedPorch", axis = 1)
full_data["HasSsnPorch"] = full_data["3SsnPorch"].apply(lambda x: 1 if (x > 0) else 0)
full_data = full_data.drop("3SsnPorch", axis = 1)
full_data["HasPool"] = full_data["PoolArea"].apply(lambda x: 1 if (x > 0) else 0)
full_data = full_data.drop("PoolArea", axis = 1)
# We complete the missing data by the most common categories
full_data["KitchenQual"] = full_data["KitchenQual"].fillna("TA")
full_data["GarageArea"] = full_data["GarageArea"].fillna("0")
full_data["MSZoning"] = full_data["MSZoning"].fillna("RL")
full_data["Exterior1st"] = full_data["Exterior1st"].fillna("VinylSd")
# We need to transform the categorical variables into 0 and 1
categorical_variables = ["Street", "CentralAir", "MSZoning", "LotShape", "LandContour", "LotConfig", "Neighborhood", "Condition1",
"HouseStyle", "RoofStyle", "Exterior1st", "Foundation", "HeatingQC", "KitchenQual", "PavedDrive",
"SaleCondition"]
full_data_dummies = full_data
for column in categorical_variables:
di = {}
i = 0
for ind in np.asarray(full_data[column].value_counts().index):
di[str(ind)] = i
i += 1
full_data[column] = full_data[column].map(di)
pclass_dummies = pd.get_dummies(full_data[column], prefix = column)
full_data_dummies = pd.concat([full_data_dummies, pclass_dummies], axis = 1)
full_data_dummies = full_data_dummies.drop(categorical_variables, axis = 1)
train_data = full_data_dummies.iloc[:ntrain, :]
test_data = full_data_dummies.iloc[ntrain:, :]
train_data.to_csv("data/train_clean.csv", index = False)
test_data.to_csv("data/test_clean.csv", index = False)
|
983,608 | f305369767fbff90a759bc55a45a27dfffba06a6 | #!/usr/bin/env python
# coding: utf-8
import sys
import os
sys.path.append(os.path.abspath('../../stratipy'))
from stratipy import (load_data, formatting_data, filtering_diffusion,
nmf_bootstrap, consensus_clustering,
hierarchical_clustering, biostat, biostat_go,
biostat_plot, parameters)
import importlib # NOTE for python >= Python3.4
import scipy.sparse as sp
import numpy as np
import time
import datetime
from scipy.io import loadmat, savemat
def initiation(mut_type, alpha, patient_data, data_folder, ssc_mutation_data,
ssc_subgroups, gene_data, ppi_data, lambd, n_components):
if mut_type == 'raw':
alpha = 0
if patient_data == 'SSC':
# result_folder = (
# data_folder + 'result_' + ssc_mutation_data + '_' +
# ssc_subgroups + '_' + gene_data + '_' + ppi_data + '/')
result_folder = (
data_folder + '/Volumes/Abu3/min/201812_MAF50_alpha0.7/result_' + ssc_mutation_data + '_' +
ssc_subgroups + '_' + gene_data + '_' + ppi_data + '/')
else:
result_folder = (data_folder + 'result_' + patient_data + '_' +
ppi_data + '/')
print(result_folder, flush=True)
print("mutation type =", mut_type, flush=True)
print("alpha =", alpha, flush=True)
print("lambda =", lambd, flush=True)
print("k =", n_components, flush=True)
return alpha, result_folder
def preprocessing(data_folder, patient_data, ssc_mutation_data, ssc_subgroups, gene_data,
ppi_data, result_folder, influence_weight, simplification,
compute, overwrite, alpha, tol, ngh_max,
keep_singletons, min_mutation, max_mutation, mut_type):
print("------------ load_data.py ------------ {}"
.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
flush=True)
if patient_data == 'TCGA_UCEC':
(patient_id, mutation_profile, gene_id_patient, gene_symbol_profile) = (
load_data.load_TCGA_UCEC_patient_data(data_folder))
elif patient_data == 'Faroe':
mutation_profile, gene_id_patient = (
load_data.load_Faroe_Islands_data(data_folder))
elif patient_data == 'SSC':
mutation_profile, gene_id_patient, patient_id = (
load_data.load_specific_SSC_mutation_profile(
data_folder, ssc_mutation_data, ssc_subgroups, gene_data))
if ppi_data == 'Hofree_STRING':
gene_id_ppi, network = load_data.load_Hofree_PPI_String(
data_folder, ppi_data)
else:
gene_id_ppi, network = load_data.load_PPI_network(
data_folder, ppi_data)
print("------------ formatting_data.py ------------ {}"
.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
flush=True)
idx_ppi, idx_ppi_only, ppi_total, mut_total, ppi_filt = (
formatting_data.formatting(
network, mutation_profile, gene_id_ppi, gene_id_patient))
print("------------ filtering_diffusion.py ------------ {}"
.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
flush=True)
ppi_final, mut_propag = (
filtering_diffusion.filtering(
ppi_filt, result_folder, influence_weight, simplification, compute,
overwrite, alpha, tol, ppi_total, mut_total, ngh_max,
keep_singletons, min_mutation, max_mutation, mut_type))
return gene_id_ppi, idx_ppi, idx_ppi_only
def parallel_bootstrap(result_folder, mut_type, influence_weight,
simplification, alpha, tol, keep_singletons,
ngh_max, min_mutation, max_mutation, n_components,
n_permutations, run_bootstrap, lambd, tol_nmf,
compute_gene_clustering, sub_perm):
final_influence_mutation_directory = result_folder + 'final_influence/'
final_influence_mutation_file = (
final_influence_mutation_directory +
'final_influence_mutation_profile_{}_alpha={}_tol={}.mat'.format(
mut_type, alpha, tol))
final_influence_data = loadmat(final_influence_mutation_file)
mut_propag = final_influence_data['mut_propag']
ppi_final_file = (
final_influence_mutation_directory +
'PPI_final_weight={}_simp={}_alpha={}_tol={}_singletons={}_ngh={}.mat'
.format(influence_weight, simplification, alpha, tol, keep_singletons,
ngh_max))
ppi_final_data = loadmat(ppi_final_file)
ppi_final = ppi_final_data['ppi_final']
nmf_bootstrap.bootstrap(
result_folder, mut_type, mut_propag, ppi_final,
influence_weight, simplification, alpha, tol, keep_singletons,
ngh_max, min_mutation, max_mutation, n_components,
n_permutations, run_bootstrap, lambd, tol_nmf,
compute_gene_clustering, sub_perm)
def post_bootstrap(result_folder, mut_type, influence_weight, simplification,
alpha, tol, keep_singletons, ngh_max, min_mutation,
max_mutation, n_components, n_permutations, lambd, tol_nmf,
compute_gene_clustering, run_consensus, linkage_method,
ppi_data, patient_data, data_folder, ssc_subgroups,
ssc_mutation_data, gene_data, p_val_threshold, compute,
overwrite):
# print("------------ consensus_clustering.py ------------ {}"
# .format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
# flush=True)
# distance_genes, distance_patients = (
# consensus_clustering.sub_consensus(
# result_folder, mut_type, influence_weight, simplification, alpha,
# tol, keep_singletons, ngh_max, min_mutation, max_mutation,
# n_components, n_permutations, lambd, tol_nmf,
# compute_gene_clustering, run_consensus))
#
# print("------------ hierarchical_clustering.py ------------ {}"
# .format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
# flush=True)
# hierarchical_clustering.hierarchical(
# result_folder, distance_genes, distance_patients, ppi_data, mut_type,
# influence_weight, simplification, alpha, tol, keep_singletons, ngh_max,
# min_mutation, max_mutation, n_components, n_permutations, lambd,
# tol_nmf, linkage_method, patient_data, data_folder, ssc_subgroups,
# ssc_mutation_data, gene_data)
print("\n------------ biostat.py ------------ {}"
.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
flush=True)
gene_id_ppi, idx_ppi, idx_ppi_only = preprocessing(
data_folder, patient_data, ssc_mutation_data, ssc_subgroups, gene_data,
ppi_data, result_folder, influence_weight, simplification, compute,
overwrite, alpha, tol, ngh_max, keep_singletons, min_mutation,
max_mutation, mut_type)
biostat.biostat_analysis(
data_folder, result_folder, patient_data, ssc_mutation_data,
ssc_subgroups, ppi_data, gene_data, mut_type, influence_weight,
simplification, alpha, tol, keep_singletons, ngh_max, min_mutation,
max_mutation, n_components, n_permutations, lambd, tol_nmf,
linkage_method, p_val_threshold, gene_id_ppi, idx_ppi, idx_ppi_only)
# biostat_go.biostat_go_enrichment(
# alpha, result_folder, mut_type, patient_data, data_folder, ssc_mutation_data,
# ssc_subgroups, gene_data, ppi_data, lambd, n_components, ngh_max, n_permutations)
print("\n------------ biostat_plot.py ------------ {}"
.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
flush=True)
# no need SSC1/SSC2, no need k
# biostat_plot.load_plot_biostat_individuals(
# result_folder, data_folder, ssc_mutation_data,
# gene_data, patient_data, ppi_data, mut_type, lambd, influence_weight,
# simplification, alpha, tol, keep_singletons, ngh_max, min_mutation,
# max_mutation, n_components, n_permutations, tol_nmf, linkage_method)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
def all_functions(i, step):
(data_folder, patient_data, ssc_mutation_data, ssc_subgroups,
gene_data, ppi_data, influence_weight, simplification, compute,
overwrite, alpha, tol, ngh_max, keep_singletons, min_mutation,
max_mutation, mut_type, n_components, n_permutations, sub_perm, sub_perm,
run_bootstrap, run_consensus, lambd, tol_nmf, compute_gene_clustering,
linkage_method, p_val_threshold) = parameters.get_params(i)
alpha, result_folder = initiation(
mut_type, alpha, patient_data, data_folder, ssc_mutation_data,
ssc_subgroups, gene_data, ppi_data, lambd, n_components)
if step == "preprocessing":
print('\n############ preprocessing step ############', flush=True)
preprocessing(data_folder, patient_data, ssc_mutation_data, ssc_subgroups,
gene_data, ppi_data, result_folder, influence_weight,
simplification, compute, overwrite, alpha, tol,
ngh_max, keep_singletons, min_mutation, max_mutation,
mut_type)
if step == "parallel_bootstrap":
print('\n############ parallel_bootstrap step ############',
flush=True)
parallel_bootstrap(result_folder, mut_type, influence_weight,
simplification, alpha, tol, keep_singletons,
ngh_max, min_mutation, max_mutation,
n_components, n_permutations, run_bootstrap,
lambd, tol_nmf, compute_gene_clustering,
sub_perm)
if step == "clustering":
print('\n############ clustering step ############', flush=True)
post_bootstrap(result_folder, mut_type, influence_weight,
simplification, alpha, tol, keep_singletons,
ngh_max, min_mutation, max_mutation, n_components,
n_permutations, lambd, tol_nmf,
compute_gene_clustering, run_consensus,
linkage_method, ppi_data, patient_data, data_folder,
ssc_subgroups, ssc_mutation_data, gene_data,
p_val_threshold, compute, overwrite)
# if step == "all":
#
#
#
#
# consensus_file = consensus_clustering.consensus_file(
# result_folder, influence_weight, simplification, mut_type, alpha, tol,
# keep_singletons, ngh_max, min_mutation, max_mutation, n_components,
# n_permutations, lambd, tol_nmf)
#
# # genes_clustering, patients_clustering directely from full bootstrap
# distance_genes, distance_patients = (
# consensus_clustering.consensus_from_full_bootstrap(
# consensus_file, genes_clustering, patients_clustering,
# run_consensus, compute_gene_clustering))
i = int(sys.argv[1])-1
step = sys.argv[2]
all_functions(i, step)
|
983,609 | c3b04d109fd5af7691855139583c23d0875f42fe | import numpy as np
import scipy.stats as sp
from math import exp, pi, sqrt
class Classifier(object):
def __init__(self):
self.Classes = {}
def fit(self, training_vectors, training_labels):
training_data = {}
for i in range(len(training_vectors)):
if training_labels[i] in training_data:
training_data[training_labels[i]].append(training_vectors[i])
else:
training_data[training_labels[i]] = [training_vectors[i]]
for each in training_data:
self.Classes[each] = []
self.Classes[each].append(np.mean(training_data[each], axis=0))
self.Classes[each].append(np.std(training_data[each], axis=0))
def predict(self, test_vector):
k = [1, 0, None]
for each in self.Classes:
k[0] = 1
for i in range(len(test_vector[0])):
k[0] *= self.normpdf(test_vector[0][i], self.Classes[each][0][i], self.Classes[each][1][i])
if k[0] >= k[1]:
k[2] = each
k[1] = k[0]
k[0] = 1
return k[2]
def normpdf(self, x, mu, sigma):
u = (float(x)-mu)/abs(sigma)
y = (1.0/(sqrt(2.0*pi)*abs(sigma)))*exp(-u*u/2.0)
return y
|
983,610 | f44fa3205e1227b942acd6fce223629446294afb | from django.db import models
from django.utils.translation import ugettext_lazy as _
from core.models import TimeStampedModel
from honey_tip.settings import AUTH_USER_MODEL
class Category(TimeStampedModel):
name = models.CharField(_('name'), max_length=30, null=False)
class Meta:
verbose_name = _('ht_category')
verbose_name_plural = _('categories')
db_table = 'category'
class Tip(TimeStampedModel):
title = models.CharField(_('title'), max_length=50, null=False)
content = models.CharField(_('content'), max_length=800, null=False)
user = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True)
class TipStatus(models.TextChoices):
POSTED: tuple = 'PST', _('Posted')
REMOVED: tuple = 'RMV', _('Removed')
HIDDEN: tuple = 'HID', _('Hidden')
REPORTED: tuple = 'RPT', _('Reported')
status = models.CharField(_('status'), choices=TipStatus.choices, max_length=3, default=TipStatus.POSTED)
class Meta:
verbose_name = _('ht_tip')
verbose_name_plural = _('tips')
db_table = _('tip')
class Report(TimeStampedModel):
user = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE)
tip = models.ForeignKey(Tip, on_delete=models.CASCADE)
class ReportStatus(models.TextChoices):
PENDING: tuple = 'PND', _('Pending')
REJECTED: tuple = 'RJT', _('Rejected')
APPROVED: tuple = 'APV', _('Approved')
status = models.CharField(_('status'), choices=ReportStatus.choices, max_length=3, default=ReportStatus.PENDING)
class Meta:
verbose_name = _('ht_report')
verbose_name_plural = _('reports')
db_table = _('report')
|
983,611 | 7c043260e91dbf99e63272ac8f58a9de8c0bea12 | #!/usr/bin/env python
from pbsv.independent.utils import is_bed, is_vcf
from pbsv.io.VcfIO import VcfReader, VcfWriter, VcfRecord, BedReader, BedWriter, BedRecord
transform_desc = 'Transform structural variants coordinates from substring to chromosome.'
def get_reader(fn):
"""Return a BedReader obj or VcfReader obj depending on input fn format"""
if is_bed(fn):
return BedReader(fn)
elif is_vcf(fn):
return VcfReader(fn)
else:
raise ValueError("Could not get reader for %s" % fn)
def get_writer(fn, samples):
"""Return a BedWriter obj or VcfWriter obj depending on fn format"""
if is_bed(fn):
return BedWriter(fn, samples)
elif is_vcf(fn):
return VcfWriter(fn, samples)
else:
raise ValueError("Could not get reader for %s" % fn)
def transform_coordinate_of_sv(svobj):
"""
Given an svobj, which must either be BedRecord or a VcfRecord, where its chrom must
conform to format `chrom__substr__start_end`, (e.g., `chr01__substr__11838__13838`),
transform its coordinate to `chrom`.
...doctest:
>>> from pbsv.independent.common import SvFmts
>>> o1 = BedRecord(chrom='chr1__substr__100_200', start=0, end=100, sv_type='Deletion', sv_id=None, \
sv_len=-100, alt=None, fmts=SvFmts.fromDict({'SAMPLE1': '0/1:3:6'}), annotations=['ALU'])
>>> o2 = transform_coordinate_of_sv(o1)
>>> o2.chrom, o2.start, o2.end
('chr1', 100, 200)
"""
chrom, start, end = get_chrom_start_end_from_string(svobj.chrom)
if isinstance(svobj, BedRecord):
return BedRecord(chrom=chrom, start=svobj.start + start, end=svobj.end + start,
sv_type=svobj.sv_type, sv_id=svobj.sv_id,
sv_len=svobj.sv_len, alt=svobj.alt, annotations=svobj.annotations,
fmts=svobj.fmts)
elif isinstance(svobj, VcfRecord):
return VcfRecord(chrom=chrom, pos=svobj.pos + start, end=svobj.end + start,
ref=svobj.ref, alt=svobj.alt, fmts=svobj.fmts,
annotations=svobj.annotations, sv_type=svobj.sv_type, sv_len=svobj.sv_len)
else:
raise TypeError("svobj must be either BedRecord or VcfRecord while it is {}".format(type(svobj)))
def get_chrom_start_end_from_string(s):
"""Get chrom name, int(start), int(end) from a string '{chrom}__substr__{start}_{end}'
...doctest:
>>> get_chrom_start_end_from_string('chr01__substr__11838_13838')
('chr01', 11838, 13838)
"""
try:
chrom, s_e = s.split('__substr__')
start, end = s_e.split('_')
return chrom, int(start), int(end)
except Exception:
raise ValueError("String %s must be of format '{chrom}__substr__{start}_{end}'" % s)
def run_transform(i_fn, o_fn):
"""Transform coordinates of all structural variants in i_fn to o_fn"""
if not (all(is_bed(fn) for fn in [i_fn, o_fn]) or all(is_vcf(fn) for fn in [i_fn, o_fn])):
raise ValueError("Input and output must be both BED or VCF")
with get_reader(i_fn) as reader:
with get_writer(o_fn, reader.samples) as writer:
for r in reader:
writer.writeRecord(transform_coordinate_of_sv(r))
|
983,612 | 1617298e5de8165200c695e07a55bc46627b43e5 | import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
categories = ['comp.graphics']
news = fetch_20newsgroups(categories = categories)
index = 3
vectorizer = TfidfVectorizer(stop_words='english')
#count_vector = CountVectorizer(stop_words='english')
vectors = vectorizer.fit_transform(news.data[:index])
#print count_vector.fit_transform(news.data[:index]).todense()
#return TF-IDF weighted document-term matrix
#print vectors
print vectorizer.vocabulary_
#print(vectors[2][1])
#print(vectors[1])
results = vectors.todense()[0][0]
print results[0][0]
#print(vectors.todense()[0])
#print(vectors.todense()[1])
|
983,613 | 69641dc862629427f6ed8330141fcceb5e4496e2 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import socket
sockcli=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
HOST='192.168.1.75'
PORT=9200
addr=(HOST,PORT)
sockcli.connect(addr)
while True:
senddata=raw_input("send data:").strip()
if len(senddata)==0:continue
sockcli.send(senddata)
revedata=sockcli.recv(1024)
print revedata
|
983,614 | 650554d8b44791568324c449647c78ab1a286bde | # noqa: D205,D400
"""
CF-Convention checking
======================
Utilities designed to verify the compliance of metadata with the CF-Convention.
"""
import fnmatch
from typing import Sequence, Union
from .formatting import parse_cell_methods
from .options import cfcheck
from .utils import VARIABLES, ValidationError
# TODO: Implement pandas infer_freq in xarray with CFTimeIndex. >> PR pydata/xarray#4033
@cfcheck
def check_valid(var, key: str, expected: Union[str, Sequence[str]]):
r"""Check that a variable's attribute has one of the expected values. Raise a ValidationError otherwise."""
att = getattr(var, key, None)
if att is None:
raise ValidationError(f"Variable does not have a `{key}` attribute.")
if isinstance(expected, str):
expected = [expected]
for exp in expected:
if fnmatch.fnmatch(att, exp):
break
else:
raise ValidationError(
f"Variable has a non-conforming {key}. Got `{att}`, expected `{expected}`",
)
def cfcheck_from_name(varname, vardata):
"""Perform cfchecks on a DataArray using specifications from xclim's default variables."""
data = VARIABLES[varname]
if "cell_methods" in data:
check_valid(
vardata, "cell_methods", parse_cell_methods(data["cell_methods"]) + "*"
)
if "standard_name" in data:
check_valid(vardata, "standard_name", data["standard_name"])
def generate_cfcheck(*varnames):
def _generated_check(*args):
for varname, var in zip(varnames, args):
cfcheck_from_name(varname, var)
return _generated_check
def check_valid_temperature(var, units):
r"""Check that variable is air temperature."""
check_valid(var, "standard_name", "air_temperature")
check_valid(var, "units", units)
def check_valid_discharge(var):
r"""Check that the variable is a discharge."""
check_valid(var, "standard_name", "water_volume_transport_in_river_channel")
check_valid(var, "units", "m3 s-1")
def check_valid_min_temperature(var, units="K"):
r"""Check that a variable is a valid daily minimum temperature."""
check_valid_temperature(var, units)
check_valid(var, "cell_methods", "time: minimum within days")
def check_valid_mean_temperature(var, units="K"):
r"""Check that a variable is a valid daily mean temperature."""
check_valid_temperature(var, units)
check_valid(var, "cell_methods", "time: mean within days")
def check_valid_max_temperature(var, units="K"):
r"""Check that a variable is a valid daily maximum temperature."""
check_valid_temperature(var, units)
check_valid(var, "cell_methods", "time: maximum within days")
|
983,615 | e5b695327c9c90564cc4f9e24a9a78ffe36c8fc7 | import time
from scrapy.spiders import CrawlSpider
from scrapy.http import TextResponse
from crawlers.UrlManager import UrlManager
from crawlers.items import BlogItem
from scrapy.loader import ItemLoader
urlManager = UrlManager()
class BaseSpider(CrawlSpider):
allowed_domains = ['insights.thoughtworks.cn']
start_urls = urlManager.start_urls
def process_item(self, response):
print("parse item url is :{0}".format(response.url))
if isinstance(response, TextResponse):
# loader = ItemLoader(item=BlogItem(), response=response)
# loader.add_css('url', 'site-main')
blog = BlogItem()
blog['url'] = response.url
blog['content'] = ''.join(response.xpath('//article//text()').extract()) \
.replace('\n', '') \
.replace('\t', '').strip()
blog['title'] = response.css('.entry-title::text').extract_first()
tag = response.css('.cat-links').xpath('string(.)').extract_first()
blog['tag'] = tag.replace('\n', '').replace('\t', '').strip() if tag else None
blog['spider'] = self.name
blog['date'] = time.strftime("%W--%Y/%m/%d/--%H:%M:%S")
yield blog
def process_links(self, links):
print("The links is {}".format(links))
for link in links:
print("{0} link is :{1}".format(self.name, link.url))
yield link
|
983,616 | fc63bb233c18301b2856eaeeb04428a850c33de2 | '''
Highlevel API to run binary classification with Keras
Created on 15.03.2019
@author: el-sharkawy
'''
import numpy as np
import tensorflow as tf # Tensorflow
from tensorflow import keras # Simplified Tensorflow Framework
from tensorflow.keras import regularizers
from tensorflow.keras import callbacks
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
def binaryClassification(data, labels, hiddenLayers, lrate, nEpochs, kSplitt=10, rp=0.01, columns=None, plotName=None):
if (columns is not None):
data = data[:, columns]
if (kSplitt > 0):
randomSeed = 0
if (randomSeed != 0):
kfold = StratifiedKFold(n_splits=kSplitt, shuffle=True, random_state=randomSeed)
else:
kfold = StratifiedKFold(n_splits=kSplitt, shuffle=True)
i = 0;
cvscores = []
# K-Fold analysis based on https://machinelearningmastery.com/evaluate-performance-deep-learning-models-keras/
for train, test in kfold.split(data, labels):
i = i+1
### Define Neuronal Network
cbks = [callbacks.TerminateOnNaN()]
layers=[keras.layers.Dense(i, activation=tf.nn.relu, kernel_regularizer=regularizers.l2(rp)) for i in hiddenLayers]
# layers=[keras.layers.Dense(i, activation=tf.nn.relu, kernel_regularizer=regularizers.l2(rp)) for i in hiddenLayers]
# layers=keras.layers.Dense(i, activation=tf.nn.relu, kernel_regularizer=regularizers.l2(rp))(layers)
layers.append(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model = keras.Sequential(layers)
model.compile(optimizer = tf.train.AdamOptimizer(),
lr = lrate,
loss = 'binary_crossentropy',
metrics = ['accuracy'])
### Execute model
history = model.fit(data[train], labels[train], epochs=nEpochs, callbacks=cbks, verbose=0) #validation_data=[test_data,test_labels]) #--> Use this to grep & plot this per Epochs (last line)
scores = model.evaluate(data[test], labels[test], verbose=0)
if (np.isnan(history.history['loss']).any()):
raise ValueError("Loss was not a number")
# Needs to be refactored
if (plotName is not None):
plt.plot(history.history['acc'])
#plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig("../data/" + plotName + str(i) + ".png")
print("%s %s: %.2f%%" % (i, model.metrics_names[1], scores[1]*100))
cvscores.append(scores[1] * 100)
print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores))) |
983,617 | 24c9f87cbdc79b357f276d5b3a99f208c3c9ebcd | import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load, load_module, reload_module, get_stdout, check_source
from functools import reduce
import os
import textwrap
from random import choice, randint
exercise = 'src.kertaa_kymmenen'
function = 'kertaa_kymmenen'
def get_correct(a: int, b: int) -> dict:
return {x: x * 10 for x in range(a, b + 1)}
@points('5.kertaa_kymmenen')
class Kertaa10Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', side_effect=[AssertionError("Syötteen pyytämistä ei odotettu")]):
cls.module = load_module(exercise, 'fi')
def test_0_paaohjelma_kunnossa(self):
ok, line = check_source(self.module)
message = """Funktioita testaava koodi tulee sijoittaa lohkon
if __name__ == "__main__":
sisälle. Seuraava rivi tulee siirtää:
"""
self.assertTrue(ok, message+line)
def test_1_funktio_olemassa(self):
try:
from src.kertaa_kymmenen import kertaa_kymmenen
except:
self.assertTrue(False, 'Koodistasi pitäisi löytyä funktio nimeltä kertaa_kymmenen(alku: int, loppu: int)')
try:
kertaa_kymmenen = load(exercise, function, 'fi')
kertaa_kymmenen(1,2)
except:
self.assertTrue(False, 'Tarkista että funktiota voi kutsua seuraavasti\nkertaa_kymmenen(1, 2)')
def test_2_paluuarvon_tyyppi(self):
kertaa_kymmenen = load(exercise, function, 'fi')
val = kertaa_kymmenen(1,2)
taip = str(type(val)).replace("<class '", '').replace("'>","")
self.assertTrue(type(val) == dict, f"Funktion {function} tulisi palauttaa arvo, jonka tyyppi on dict, nyt se palauttaa arvon {val} joka on tyyppiä {taip}.")
def test_3_lukusarjat(self):
test_cases = ((1,3),(0,6),(2,8),(20,23),(100,110))
for test_case in test_cases:
with patch('builtins.input', side_effect=[AssertionError("Syötteen pyytämistä ei odotettu")]):
reload_module(self.module)
output_alussa = get_stdout()
kertaa_kymmenen = load(exercise, function, 'fi')
value = kertaa_kymmenen(test_case[0], test_case[1])
correct = get_correct(test_case[0], test_case[1])
self.assertEqual(len(correct), len(value), f"Palautetussa sanakirjassa pitäisi olla {len(correct)} alkiota, mutta siinä on {len(value)} alkiota: \n{value} kun parametrit ovat {test_case}")
self.assertEqual(value, correct, f"Tulos \n{value}\nei vastaa mallivastausta \n{correct}\nkun parametrit ovat \n{test_case}")
if __name__ == '__main__':
unittest.main()
|
983,618 | d5656133431daac1faa6cd088fe996910501b5db | #constants
import itertools
class Constants:
all_possible_tuples_for_consumables = list(itertools.product(range(1, 10), range(1, 10)))
all_possible_positions = list((itertools.product([0], range(1, 10))))
all_possible_positions.extend(list(itertools.product([9], range(1, 10))))
all_possible_positions.extend(list(itertools.product(range(1, 10), [0])))
all_possible_positions.extend(list(itertools.product(range(1, 10), [9])))
all_possible_positions_for_participants = all_possible_positions.copy() |
983,619 | 627eabc0e00454ac7da73d300b558872d89a9c2c | # Generated by Django 3.0.3 on 2020-03-23 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='organization',
name='core_sample_group_uuid',
field=models.UUIDField(blank=True, null=True),
),
migrations.AddField(
model_name='pangeauser',
name='personal_org_uuid',
field=models.UUIDField(blank=True, null=True),
),
]
|
983,620 | 80a6f2e560d8a16efe4221a059ca7aaf630ae2ca | # Generated by Django 2.2.4 on 2019-08-28 06:39
from django.db import migrations, models
def defaults(apps, schema_editor):
System = apps.get_model('vehicle', 'System')
s = System(name='full_name', value='ACD Vehicle Plate Recognition System')
s.save()
s = System(name='short_name', value='ACD-VPR-SYS')
s.save()
s = System(name='mission', value='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque massa lectus, placerat eget tortor id, vehicula pharetra quam. Proin sit amet aliquam sem. Maecenas ac porta magna. Mauris posuere aliquam posuere. Ut efficitur nunc eu augue venenatis luctus. Nulla tellus turpis, molestie id faucibus et, porttitor porttitor nibh. Quisque ut laoreet tortor, nec vestibulum velit. Cras ultrices ut ligula at ultricies. Nulla est odio, sollicitudin id neque at, congue tristique felis.<br>Maecenas varius lorem sit amet est convallis auctor. Sed purus leo, luctus sit amet vestibulum et, rutrum sed purus. Sed sagittis scelerisque mi, at interdum ante malesuada at. Pellentesque faucibus consectetur suscipit. Sed eleifend mattis turpis gravida cursus. Aenean risus lectus, efficitur eget posuere sit amet, convallis a elit. Phasellus sit amet lorem a nisi cursus vulputate nec sit amet mi. Interdum et malesuada fames ac ante ipsum primis in faucibus.')
s.save()
s = System(name='vision', value='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam pharetra ante eget urna tincidunt, et pharetra ligula aliquet. Praesent vitae enim urna. Nulla id tortor ipsum. Curabitur mattis lectus ac nisi lobortis consectetur. Integer at justo non mi iaculis finibus ut a turpis. Ut auctor tortor nunc, ut bibendum augue lacinia et. Nulla facilisi.<br>Maecenas nunc nunc, lacinia ut vehicula eu, dapibus a ex. Aenean pretium euismod tristique. Nullam sit amet volutpat dui. Morbi finibus nec ipsum ut sollicitudin. Pellentesque luctus ante placerat nibh elementum, porttitor pharetra enim suscipit. Mauris ut massa a dui scelerisque scelerisque quis sit amet ante. Sed convallis sed augue nec posuere. Quisque eleifend mollis lorem. Maecenas lacinia diam nunc, sed pulvinar est elementum sit amet. Duis scelerisque pretium neque, in luctus felis cursus at. Suspendisse gravida auctor sagittis. Fusce id purus nisi. Quisque euismod orci massa.')
s.save()
class Migration(migrations.Migration):
dependencies = [
('vehicle', '0005_auto_20190827_0058'),
]
operations = [
migrations.CreateModel(
name='System',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('value', models.TextField()),
],
),
]
|
983,621 | 5920acff829d6ac19ee0273c332eaad7931542ce | """
Unmerged conflicts in status, which are one of:
DD unmerged, both deleted
AU unmerged, added by us
UD unmerged, deleted by them
UA unmerged, added by them
DU unmerged, deleted by us
AA unmerged, both added
UU unmerged, both modified
"""
MERGE_CONFLICT_PORCELAIN_STATUSES = (
("D", "D"), # unmerged, both deleted
("A", "U"), # unmerged, added by us
("U", "D"), # unmerged, deleted by them
("U", "A"), # unmerged, added by them
("D", "U"), # unmerged, deleted by us
("A", "A"), # unmerged, both added
("U", "U") # unmerged, both modified
)
|
983,622 | 44e78e87a3410e09c17c8f208bfecd5c1dd2dd99 | import pandas as pd
# Convert reddit_web.csv to a chart of daily post volume
raw_dataset = pd.read_csv('reddit_wsb.csv')
post_dates = raw_dataset["timestamp"].tolist()
for index in range(len(post_dates)):
post_dates[index] = post_dates[index].split(" ")[0]
post_dates = list(set(post_dates))
with open('wsb_daily_volume.csv', 'w') as f:
f.write("date,volume\n")
for post_date in post_dates:
posts_on_this_day = raw_dataset[raw_dataset['timestamp'].str.contains(post_date)]
num_posts_on_this_day = posts_on_this_day.shape[0]
f.write("'%s',%d\n" % (post_date, num_posts_on_this_day))
f.close()
|
983,623 | a2e72dca1ba33a410838ab2724d79e538e2a9da8 | s = 0
for k in range(1, 123456790, 2):
s = s + k
print(s) |
983,624 | 724df4198c903c72685d34de82c1cde0235ebe18 | import torch
import torchvision
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
import random
import numpy as np
def reduction1(U, sigma, vh, x1):
y = 0
z = 0
for i in range(20):
y += sigma[i][i]
z = 0
index = -1
for i in range(20):
z += sigma[i][i]
x = ((z*100.0))/y
if(x>=95):
index = i
break
index = index + 1
rem = 20 - index
#print("reduction 1",index)
if(index!=-1):
U = U[:,:(20-rem)]
sigma = sigma[:(20-rem),:rem]
vh = vh[:rem,:]
x11 = torch.mm(torch.mm(U, sigma), vh)
if(index==-1):
print("NO")
return U, sigma, vh, x11
def reduction(U, sigma, vh, x1):
sum_total = 0
sum_current = 0
print("initially",U.size())
#print(torch.matmul(U,U.t()))
for i in range(20):
sum_total += sigma[i][i]
index = -1
#print("total1",sum_total)
for i in range(20):
sum_current += sigma[i][i]
x = (sum_current*100.0)/sum_total
if(x>95):
index = i
break
index = index + 1
rem = 20 - index
#print("total2",sum_current)
#print("reduction index", index)
if(index!=-1):
U = U[:,:(784-rem)]
sigma = sigma[:(784-rem),:rem]
vh = vh[:rem,:]
x11 = torch.mm(torch.mm(U, sigma), vh)
print("finally",U.size())
#print(torch.matmul(U,U.t()))
if(index==-1):
print("NO")
return U, sigma, vh, x11
|
983,625 | 545c1add97e174c5a44191a9a986532d02f7a042 | #!/usr/bin/env python2
def answer(x, y):
#check in which list we have the moved worker
if len(x) > len(y):
moved_list = [i for i in x if i not in y]
else:
moved_list = [i for i in y if i not in x]
#as we only have one moved worker, we get it
value = moved_list[0]
print(value)
return value
x = [13, 5, 6, 2, 5]
y = [5, 2, 5, 13]
answer(x,y)
print('---> 6')
x = [14, 27, 1, 4, 2, 50, 3, 1]
y = [2, 4, -4, 3, 1, 1, 14, 27, 50]
answer(x,y)
print('---> -4') |
983,626 | 99f6bfa8c10cfde4e7ecee41ef44647e72d09614 | from app import app
from flask import request, redirect, Response
import os
import base64
@app.route('/')
def main():
return( "Hi")
@app.route('/bytes/<size>', methods=('GET', 'POST'))
def bytes(size):
try:
s = int(size)
except ValueError:
return Response("Invalid argument", status=400)
d = os.urandom(s)
return base64.b64encode(d)
|
983,627 | 72cfa2bb908332e43cb84faab3555c886c2587c8 | from typing import List
from dataclasses import dataclass, field
from sebs.cache import Cache
@dataclass
class MinioConfig:
address: str = ""
mapped_port: int = -1
access_key: str = ""
secret_key: str = ""
instance_id: str = ""
input_buckets: List[str] = field(default_factory=list)
output_buckets: List[str] = field(default_factory=list)
type: str = "minio"
def update_cache(self, path: List[str], cache: Cache):
for key in MinioConfig.__dataclass_fields__.keys():
cache.update_config(val=getattr(self, key), keys=[*path, key])
@staticmethod
def deserialize(data: dict) -> "MinioConfig":
keys = list(MinioConfig.__dataclass_fields__.keys())
data = {k: v for k, v in data.items() if k in keys}
return MinioConfig(**data)
def serialize(self) -> dict:
return self.__dict__
|
983,628 | 9e60a1b78b7584e79c92403556ad47c6c91fb09c | from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('rfqshow',views.rfqshow,name='rfqshow'),
path('customerlist',views.customerlist,name='customerlist'),
path('searchforcustomer',views.searchforcustomer,name='searchforcustomer'),
path('workload',views.workload,name='workload'),
path('quotesrecieved',views.quotesrecieved,name='quotesrecieved'),
path('searchformanufacturer',views.searchformanufacturer,name='searchformanufacturer'),
path('loader/<int:id>',views.searchforsending,name='searchforsending'),
path('sendinglistmanufacturer<int:id>',views.sendinglistmanufacturer,name='sendinglistmanufacturer'),
]
|
983,629 | d981d4487de8513edfdf8231670756e5216ab6fa | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Import des modules
from random import randint
# Déclaration des fonctions
def tri_pivot(liste):
n = len(liste)
elt_tri = liste[randint(0, n - 1)]
(tab_inf, tab_egal, tab_sup) = ([], [], [])
for elt in liste:
if elt < elt_tri:
tab_inf.append(elt)
elif elt > elt_tri:
tab_sup.append(elt)
else:
tab_egal.append(elt)
if len(tab_inf) > 0:
tab_inf = tri_pivot(tab_inf)
if len(tab_sup) > 0:
tab_sup = tri_pivot(tab_sup)
tab_egal = tab_inf + tab_egal + tab_sup
return tab_egal
# Fonction Main
def main():
liste_test = [randint(1, 20) for i in range(20)]
print(liste_test)
liste_triee = tri_pivot(liste_test)
print(liste_triee)
print(liste_triee == sorted(liste_test))
# Initialisation de la fonction Main
if __name__ == "__main__":
main()
|
983,630 | 8363d477339a7ec64a6f1bf09c4e39c32b8a7f41 | #!/usr/bin/env python3
""" Scheduler """
import os
import glob
import time
import queue
import threading
from job import Job
from trace_dict import TraceDict
from trace_record import TraceRecord
from result import Result
class Scheduler:
"""
Creates benchmark jobs and runs jobs (in parallel)
"""
def __init__(self, runner, result_path, configurations, output):
self.runner = runner
self.result_path = result_path
self.configurations = configurations
self.time_start = time.time()
self.jobs = queue.Queue()
self.results = queue.Queue()
self.output = output
def _model_files(self, model_path):
pattern = os.path.join(model_path, "*." + self.runner.modelfile_ext)
return sorted(glob.glob(pattern))
@staticmethod
def _configuration_name(configuration):
name = ''
for i, (_, option) in enumerate(configuration):
if i > 0:
name += '_'
name += str(option)
return name
def num_jobs(self):
"""
Returns the number of jobs currently in the job pool
"""
return self.jobs.qsize()
def create(self, model_path, max_jobs=10000000, max_time=60, kill_time=30, solu_file=None):
"""
Creates benchmark jobs
Arguments
---------
model_path: str
Path to model instances
max_jobs: int
Max allowed jobs
max_time: int
Max allowed time per job
kill_time: int
Time (+max_time) after which a process should be killed
"""
# pylint: disable=too-many-arguments
# load solution file
optsol = None
if solu_file is not None:
optsol = TraceDict()
optsol.load_solu(solu_file)
# create jobs
for conf in self.configurations:
conf_name = self._configuration_name(conf)
for model in self._model_files(model_path):
if self.num_jobs() + 1 > max_jobs:
return
modelname = os.path.splitext(os.path.basename(model))[0]
workdir = os.path.join(self.result_path, conf_name, modelname)
job = Job(modelname, workdir, model, conf, max_time, kill_time)
if optsol is not None and modelname in optsol.records:
record = optsol.records[modelname].record
job.model_status = record['ModelStatus']
job.objective = record['ObjectiveValue']
job.objective_estimate = record['ObjectiveValueEstimate']
self.jobs.put(job)
def run(self, n_threads=1, max_duration=10000000):
"""
Starts the benchmark
Arguments
---------
n_threads: int
Number of threads to run jobs in parallel
max_duration: int
Max allowed total duration of benchmark
"""
for i in range(n_threads):
self.jobs.put(None)
# run jobs
if n_threads == 1:
self._run_thread(0, max_duration)
else:
threads = []
for i in range(n_threads):
threads.append(threading.Thread(target=self._run_thread, args=(i, max_duration)))
threads[-1].start()
for i in range(n_threads):
threads[i].join()
# get results and put them in trace files per configuration
self.results.put(None)
traces = dict()
for conf in self.configurations:
traces[self._configuration_name(conf)] = TraceDict()
while True:
result = self.results.get()
if result is None:
break
traces[result[1]].append(result[2])
# write trace files
for conf_name, conf_traces in traces.items():
conf_traces.write(os.path.join(self.result_path, conf_name, 'trace.trc'))
def _duration(self):
return time.time() - self.time_start
def _run_thread(self, thread_id, max_duration):
while True:
job = self.jobs.get()
if job is None:
break
conf_name = self._configuration_name(job.configuration)
if job.init_workdir() and self._duration() <= max_duration:
result = self.runner.run(job)
self.results.put((job.name, conf_name, result.trace))
else:
trace = TraceRecord(job.filename())
trace.load_trc(os.path.join(job.workdir, 'trace.trc'))
result = Result(trace, "", "")
self.results.put((job.name, conf_name, trace))
self.output.print(job, result, self._duration(), self.num_jobs(), thread_id)
|
983,631 | 38b7d0de31494e71cb02c4a2b61e17b3d5580f31 | from sys import argv
from os.path import exists
from os.path import getsize
script, from_file, to_file = argv
# Open file to read
in_file = open(from_file)
# Read the file
indata = in_file.read()
exists(to_file)
# Open file to write
out_file = open(to_file, 'w')
# Write data into the file
out_file.write(indata)
# Close files
out_file.close()
in_file.close()
print """The size of the
input file: %r bytes,
output file: %r bytes""" % (getsize(from_file), getsize(to_file)) |
983,632 | 1d427ab28e6d5827313bdf7a2f2ee4f68de85fbc | from datetime import date
from models.Price import Price
class Order:
'''Klasi fyrir pöntunina'''
def __init__(self, date1, date2, group, car,extra_insurance, customer, payment, card_number='',returned = False):
self.__date1 = date1
self.__date2 = date2
self.__car = car
self.__extra_insurance = extra_insurance
self.__customer = customer
self.__payment = payment
self.__card_number = card_number
self.__returned = returned
self.__group = group
self.__price = self.calculate_price(group)
self.__group_name = self.get_group_name(group)
def __str__(self):
'''Prentar út upplýsingar um tíman, bílin, trygginguna og fl.'''
return "Time period: {} - {} \tCar: {}\n\tExtra Insurance: {} \t\t\tPrice: {:,d} kr \n\tCustomer ssn: {} \t\tPayment: {} \n\tCard number: {} \tStatus: {}\n".format(self.print_date(self.__date1), self.print_date(self.__date2),
self.__car, self.print_extra_insurance(),self.__price, self.print_ssn(), self.__payment, self.print_cardnum(), self.get_status())
def __repr__(self):
'''Prentar út klasa instans'''
return "Order('{}','{}','{}','{}',{},'{}','{}','{}',{})".format(self.__date1,
self.__date2,self.__group,self.__car,self.__extra_insurance,self.__customer,self.__payment,self.__card_number,
self.__returned)
def get_status(self):
if self.__returned == True:
return 'Delivered'
else:
return 'In rent' #veit ekki með þetta orðalag
def get_date1(self):
return self.__date1
def get_date2(self):
return self.__date2
def calculate_price(self, group):
'''Reiknar út hvað það kostar að leigja bíl í einn dag og margfaldar það með tímann sem hann er leigður,skilar útkomunni'''
price_per_day = self.get_price_per_day(group)
price_all_period = price_per_day * self.get_time_period()
return price_all_period
def get_time_period(self):
'''Mínusar dagsetninguna sem hann er skilaður við dagsetninguna sem hann er lánaður'''
year1, month1, day1 = self.__date1.split('-')
year2, month2, day2 = self.__date2.split('-')
date1 = date(int(year1), int(month1), int(day1))
date2 = date(int(year2), int(month2), int(day2))
time_period = (date2 - date1).days
return time_period
def print_date(self, date):
'''Gerir dagsetningu prentanlegri'''
year, month, day = date.split('-')
return '{}.{}.{}'.format(day, month, year)
def print_extra_insurance(self):
''' Gerir vidbotatryggingu prentanlega'''
if self.__extra_insurance == True:
return 'Yes'
else:
return 'No'
def print_ssn(self):
'''Gerir kennitöluprentanlegri'''
ssn = self.__customer[:6]+'-'+self.__customer[6:]
return ssn
def print_cardnum(self):
''' gerir kortanumer prentanlegra'''
cardnum = self.__card_number[:4]+'-'+self.__card_number[4:8]+'-'+self.__card_number[8:12]+'-'+self.__card_number[12:16]
return cardnum
def get_price_per_day(self, group):
'''Skilar greiðslu á hvað kostar að leigja bíl í einn dag'''
price_per_day = Price.price_dict[str(group)][Price.PRICE]
return price_per_day
def get_group(self):
return self.__group
def get_group_name(self,group):
return Price.price_dict[str(group)][Price.NAME]
def get_car(self):
return self.__car
def get_customer(self):
return self.__customer
def get_payment(self):
return self.__payment
def get_card_number(self):
return self.__card_number
def get_price(self):
return self.__price
def get_extra_insurance(self):
return self.__extra_insurance
def get_returned(self):
return self.__returned
def file_delivery(self):
'''Prentar út ef það er búið að skila bílnum '''
self.__returned = True
def get_id(self):
'''Skilar id á pöntunini sem að er bílnúmerið'''
return self.__car |
983,633 | 6c4f4d74b56da9b513e7d4a719328e2876b50904 | def read_input(file_name):
f_input = open(file_name, 'r')
new_move = f_input.readline()
moves = []
while (new_move is not None) and len(new_move) > 0:
lst = [int(e) for e in new_move.split(",")]
if is_a_not_valid_line(lst):
return None
moves.append(lst)
new_move = f_input.readline()
return moves
def is_a_not_valid_line(lst):
if len(lst) != 4:
return True
for e in lst:
if e < 0 or e > 7:
return True
return False
def get_illegal_move_string(move, line_number): #TODO - move to "input processing.py" and call the file in_out_processing.py
return "line : " + "%d" % line_number + " illegal move: " + "%s" % str(move) |
983,634 | 513b5def66e86084468932a7df7ace51048ae533 |
from freezegun import freeze_time
import datetime
def right_time():
return datetime.datetime.now().year == 2019
@freeze_time("2019-01-14")
def test_2019():
assert right_time() is True
import pdb; pdb.set_trace() |
983,635 | 7ca1d3ac2888ba4c7a074571934e4315462f680d | from sympy import sqrt, symbols
from qalgebra.core.hilbert_space_algebra import LocalSpace
from qalgebra.core.operator_algebra import (
Commutator,
IdentityOperator,
LocalProjector,
LocalSigma,
OperatorSymbol,
ZeroOperator,
)
from qalgebra.library.fock_operators import Create, Destroy
from qalgebra.library.spin_algebra import Jplus, Jz, SpinSpace
from qalgebra.toolbox.commutator_manipulation import expand_commutators_leibniz
def test_disjunct_hs():
"""Test that commutator of objects in disjunt Hilbert spaces is zero"""
hs1 = LocalSpace("1")
hs2 = LocalSpace("2")
alpha, beta = symbols('alpha, beta')
A = OperatorSymbol('A', hs=hs1)
B = OperatorSymbol('B', hs=hs2)
assert Commutator.create(A, B) == ZeroOperator
assert Commutator.create(alpha, beta) == ZeroOperator
assert Commutator.create(alpha, B) == ZeroOperator
assert Commutator.create(A, beta) == ZeroOperator
def test_commutator_hs():
"""Test that commutator is in the correct Hilbert space"""
hs1 = LocalSpace("1")
hs2 = LocalSpace("2")
A = OperatorSymbol('A', hs=hs1)
B = OperatorSymbol('B', hs=hs2)
C = OperatorSymbol('C', hs=hs2)
assert Commutator.create(B, C).space == hs2
assert Commutator.create(B, A + C).space == hs1 * hs2
def test_pull_out_scalars():
"""Test that scalars are properly pulled out of commutators"""
hs = LocalSpace("sys")
A = OperatorSymbol('A', hs=hs)
B = OperatorSymbol('B', hs=hs)
alpha, beta = symbols('alpha, beta')
assert Commutator.create(alpha * A, B) == alpha * Commutator(A, B)
assert Commutator.create(A, beta * B) == beta * Commutator(A, B)
assert Commutator.create(alpha * A, beta * B) == alpha * beta * Commutator(
A, B
)
def test_commutator_expansion():
"""Test expansion of sums in commutator"""
hs = LocalSpace("0")
A = OperatorSymbol('A', hs=hs)
B = OperatorSymbol('B', hs=hs)
C = OperatorSymbol('C', hs=hs)
D = OperatorSymbol('D', hs=hs)
alpha = symbols('alpha')
assert Commutator(A + B, C).expand() == Commutator(A, C) + Commutator(B, C)
assert Commutator(A, B + C).expand() == Commutator(A, B) + Commutator(A, C)
assert Commutator(A + B, C + D).expand() == (
Commutator(A, C)
+ Commutator(A, D)
+ Commutator(B, C)
+ Commutator(B, D)
)
assert Commutator(A + B, C + D + alpha).expand() == (
Commutator(A, C)
+ Commutator(A, D)
+ Commutator(B, C)
+ Commutator(B, D)
)
def test_diff():
"""Test differentiation of commutators"""
hs = LocalSpace("0")
A = OperatorSymbol('A', hs=hs)
B = OperatorSymbol('B', hs=hs)
alpha, t = symbols('alpha, t')
assert Commutator(alpha * t ** 2 * A, t * B).diff(t) == (
3 * alpha * t ** 2 * Commutator(A, B)
)
assert Commutator.create(alpha * t ** 2 * A, t * B).diff(t) == (
3 * alpha * t ** 2 * Commutator(A, B)
)
assert Commutator(A, B).diff(t) == ZeroOperator
def test_series_expand():
"""Test series expension of commutator"""
hs = LocalSpace("0")
A = OperatorSymbol('A', hs=hs)
B = OperatorSymbol('B', hs=hs)
a3, a2, a1, a0, b3, b2, b1, b0, t, t0 = symbols(
'a_3, a_2, a_1, a_0, b_3, b_2, b_1, b_0, t, t_0'
)
A_form = (a3 * t ** 3 + a2 * t ** 2 + a1 * t + a0) * A
B_form = (b3 * t ** 3 + b2 * t ** 2 + b1 * t + b0) * B
comm = Commutator.create(A_form, B_form)
terms = comm.series_expand(t, 0, 2)
assert terms == (
a0 * b0 * Commutator(A, B),
(a0 * b1 + a1 * b0) * Commutator(A, B),
(a0 * b2 + a1 * b1 + a2 * b0) * Commutator(A, B),
)
A_form = (a1 * t + a0) * A
B_form = (b1 * t + b0) * B
comm = Commutator.create(A_form, B_form)
terms = comm.series_expand(t, t0, 1)
assert terms == (
(
(a0 * b0 + a0 * b1 * t0 + a1 * b0 * t0 + a1 * b1 * t0 ** 2)
* Commutator(A, B)
),
(a0 * b1 + a1 * b0 + 2 * a1 * b1 * t0) * Commutator(A, B),
)
comm = Commutator.create(A, B)
terms = comm.series_expand(t, t0, 1)
assert terms == (Commutator(A, B), ZeroOperator)
def test_commutator_oder():
"""Test anti-commutativity of commutators"""
hs = LocalSpace("0")
A = OperatorSymbol('A', hs=hs)
B = OperatorSymbol('B', hs=hs)
assert Commutator.create(B, A) == -Commutator(A, B)
a = Destroy(hs=hs)
a_dag = Create(hs=hs)
assert Commutator.create(a, a_dag) == -Commutator.create(a_dag, a)
def test_known_commutators():
"""Test that well-known commutators are recognized"""
fock = LocalSpace("0")
spin = SpinSpace("0", spin=1)
a = Destroy(hs=fock)
a_dag = Create(hs=fock)
assert Commutator.create(a, a_dag) == IdentityOperator
assert Commutator.create(a_dag, a) == -IdentityOperator
assert Commutator.create(
LocalSigma(1, 0, hs=fock), LocalSigma(0, 1, hs=fock)
) == LocalProjector(1, hs=fock) - LocalProjector(0, hs=fock)
assert Commutator.create(
LocalSigma(1, 0, hs=fock), LocalProjector(1, hs=fock)
) == (-1 * LocalSigma(1, 0, hs=fock))
assert Commutator.create(
LocalSigma(1, 0, hs=fock), LocalProjector(0, hs=fock)
) == LocalSigma(1, 0, hs=fock)
assert Commutator.create(LocalSigma(1, 0, hs=fock), Create(hs=fock)) == (
-sqrt(2) * LocalSigma(2, 0, hs=fock)
)
assert Commutator.create(Jplus(hs=spin), Jz(hs=spin)) == -Jplus(hs=spin)
def test_commutator_expand_evaluate():
"""Test expansion and evaluation of commutators"""
hs = LocalSpace("0")
A = OperatorSymbol('A', hs=hs)
B = OperatorSymbol('B', hs=hs)
C = OperatorSymbol('C', hs=hs)
D = OperatorSymbol('D', hs=hs)
E = OperatorSymbol('E', hs=hs)
expr = Commutator(A, B * C * D * E)
res = (
B * C * D * Commutator(A, E)
+ B * C * Commutator(A, D) * E
+ B * Commutator(A, C) * D * E
+ Commutator(A, B) * C * D * E
)
assert expand_commutators_leibniz(expr) == res
assert expr.doit([Commutator]) == (A * B * C * D * E - B * C * D * E * A)
assert res.doit([Commutator]).expand() == (
A * B * C * D * E - B * C * D * E * A
)
assert expand_commutators_leibniz(expr, expand_expr=False) == (
B
* (
C * (D * Commutator(A, E) + Commutator(A, D) * E)
+ Commutator(A, C) * D * E
)
+ Commutator(A, B) * C * D * E
)
expr = Commutator(A * B * C, D)
assert expand_commutators_leibniz(expr) == (
A * B * Commutator(C, D)
+ A * Commutator(B, D) * C
+ Commutator(A, D) * B * C
)
expr = Commutator(A * B, C * D)
assert expand_commutators_leibniz(expr) == (
A * Commutator(B, C) * D
+ C * A * Commutator(B, D)
+ C * Commutator(A, D) * B
+ Commutator(A, C) * B * D
)
|
983,636 | e980ef21d6979f696bbcc03ebcbe8619a61481a8 | import argparse
import pathlib
import pandas as pd
import os
import sys
import h5py
import yaml
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['axes.spines.top'] = False
rcParams['axes.spines.right'] = False
def main():
my_parser = argparse.ArgumentParser(description='Magic random forest classifier')
my_parser.add_argument('-i', '--input',
action='store',
metavar='hdf5_input',
type=str,
required=True,
help='Input directory with source hdf5 files')
# Execute the parse_args() method
args = my_parser.parse_args()
hdf5_input = args.input
if not os.path.isdir(hdf5_input):
print('The hdf5 input path specified is not a directory')
sys.exit()
xs = [0,0,0,1,1,1,2,2,2]
ys = [0,1,2,0,1,2,0,1,2]
i = 0
fig, axs = plt.subplots(3, 3, figsize=(12,8))
for file in os.listdir(hdf5_input):
if not file.endswith('.hdf'):
continue
data = pd.read_hdf(os.path.join(hdf5_input, file))
axs[xs[i], ys[i]].plot(data['can0_MO_Gangposition_mean'])
axs[xs[i], ys[i]].tick_params(axis='y')
axs[xs[i], ys[i]].grid(color='gray', linestyle='-', linewidth=0.25, alpha=0.5)
axs[xs[i], ys[i]].set_title('Driver %d' % (i+1))
axs[xs[i], ys[i]].get_xaxis().set_visible(False)
i += 1
plt.savefig('../Thesis/images/gear_position.png', bbox_inches='tight')
# plt.show()
main()
|
983,637 | de969f12b577d91078f8864a2ef377e64f77a0fe | from sys import maxint
class FoxAndSightseeing:
def getMin(self, position):
l, m = len(position), maxint
for i in xrange(1, l-1):
s = 0
for j in xrange(1, l):
if i == j:
continue
p = j-2 if j == i+1 else j-1
s += abs(position[j]-position[p])
m = min(m, s)
return m
|
983,638 | 14b77aa387350c0ec5acd30a10f9e3bf5d3f2078 | from flask import Flask
def create_app(config, db):
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(config)
app.config.from_pyfile('config.py')
db.init_app(app)
return app
|
983,639 | a25af77aac750ccfdc4666cb67bda36c387f77df | from typing import List
if __name__ == '__main__':
# 寻找两个正序数组的中位数 (归并法)时间复杂度O(m+n)
# def findMedianSortedArrays(nums1: List[int], nums2: List[int]) -> float:
# l1 = len(nums1)
# l2 = len(nums2)
# if l1 == 0 or l2 == 0:
# new = nums1 + nums2
# count = l1 + l2
# if count % 2 == 0:
# return (new[count // 2 - 1] + new[count // 2]) / 2
# else:
# return new[count // 2]
# new = []
# count, i, j = 0, 0, 0
# while count <= (l1 + l2) // 2:
# if i == l1:
# while j < l2:
# new.append(nums2[j])
# j += 1
# count += 1
# break
# if j == l2:
# while i < l1:
# new.append(nums1[i])
# i += 1
# count += 1
# break
# if nums1[i] < nums2[j]:
# new.append(nums1[i])
# i += 1
# count += 1
# else:
# new.append(nums2[j])
# j += 1
# count += 1
# print(new)
# print((l1 + l2) // 2)
# if (l1 + l2) % 2 == 0:
# a = new[-1]
# new.pop()
# b = new[-1]
#
# return (a + b) / 2
# else:
# return new[-1]
#
# 寻找两个正序数组的中位数 (代码简化)时间复杂度O(m+n)
# 只遍历一半。
def findMedianSortedArrays(nums1: List[int], nums2: List[int]) -> float:
a = len(nums1)
b = len(nums2)
length = (a + b) // 2
# 两数组的下角标和遍历到的值与上一个值(合并奇偶)。
cur, pre = -1, -1
for _ in range(length + 1):
pre = cur
if nums1 and (not nums2 or nums1[0] < nums2[0]):
cur = nums1[0]
del (nums1[0])
else:
cur = nums2[0]
del (nums2[0])
if (a + b) % 2 == 0:
return (pre + cur) / 2
return cur
nums1 = []
nums2 = [2]
print(findMedianSortedArrays(nums1, nums2))
|
983,640 | 0d52f70745d6b12d3fe1f201c9b148574aefc104 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import pickle
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score, f1_score
def f1_score_threshold(y_true, y_score, n_split=100):
threshold = np.linspace(np.min(y_score), np.max(y_score), n_split)
f1 = np.max([f1_score(y_true, y_score >= theta) for theta in threshold])
return f1
def evaluate_group(y_true, y_score):
auc = roc_auc_score(y_true, y_score)
aupr = average_precision_score(y_true, y_score)
f1 = f1_score_threshold(y_true, y_score)
return auc, aupr, f1
if __name__ == "__main__":
with open("../../config/utils/temporal/evaluation_micro_temporal_HPODNets.json") as fp:
config = json.load(fp)
# load HPO annotations
with open(config["dataset"], 'rb') as fp:
dataset = pickle.load(fp)
# list of proteins
protein_list = list(dataset["annotation"].index)
# list of HPO terms
term_list = list(dataset["annotation"].columns)
# full HPO annotations
full_annotation = dataset["annotation"]
# calculate the frequency of HPO terms
term_freq = full_annotation.sum(axis=0)
if config["mode"] == "cv":
for method in config["result"]:
print(method["name"], end='\n')
for fold in range(5):
test_mask = dataset["mask"][fold]["test"].reindex(
index=protein_list, columns=term_list, fill_value=0)
test_annotation = test_mask * full_annotation
with open(method["prediction"][fold]) as fp:
result = json.load(fp)
result = pd.DataFrame(result).fillna(0).reindex(
index=protein_list, columns=term_list, fill_value=0
)
y_true_uncommon = np.zeros(0)
y_true_common = np.zeros(0)
y_true_very_common = np.zeros(0)
y_true_extremely_common = np.zeros(0)
y_score_uncommon = np.zeros(0)
y_score_common = np.zeros(0)
y_score_very_common = np.zeros(0)
y_score_extremely_common = np.zeros(0)
for term in term_list:
y_mask = test_mask[[term]].values.flatten()
y_true = test_annotation[[term]].values.flatten()[y_mask == 1]
if len(np.unique(y_true)) < 2:
continue
y_score = result[[term]].values.flatten()[y_mask == 1]
if 11 <= term_freq[term] <= 30:
y_true_uncommon = np.concatenate((y_true_uncommon, y_true))
y_score_uncommon = np.concatenate((y_score_uncommon, y_score))
elif 31 <= term_freq[term] <= 100:
y_true_common = np.concatenate((y_true_common, y_true))
y_score_common = np.concatenate((y_score_common, y_score))
elif 101 <= term_freq[term] <= 300:
y_true_very_common = np.concatenate((y_true_very_common, y_true))
y_score_very_common = np.concatenate((y_score_very_common, y_score))
elif term_freq[term] >= 301:
y_true_extremely_common = np.concatenate((y_true_extremely_common, y_true))
y_score_extremely_common = np.concatenate((y_score_extremely_common, y_score))
auc_uncommon, aupr_uncommon, f1_uncommon = evaluate_group(y_true_uncommon, y_score_uncommon)
auc_common, aupr_common, f1_common = evaluate_group(y_true_common, y_score_common)
auc_very_common, aupr_very_common, f1_very_common = evaluate_group(y_true_very_common,
y_score_very_common)
auc_extremely_common, aupr_extremely_common, f1_extremely_common = evaluate_group(y_true_extremely_common,
y_score_extremely_common)
print("Fold", fold)
print("AUC: %.4lf\t%.4lf\t%.4lf\t%.4lf" %
(auc_uncommon, auc_common, auc_very_common, auc_extremely_common))
print("AUPR: %.4lf\t%.4lf\t%.4lf\t%.4lf" %
(aupr_uncommon, aupr_common, aupr_very_common, aupr_extremely_common))
print("F1: %.4lf\t%.4lf\t%.4lf\t%.4lf" %
(f1_uncommon, f1_common, f1_very_common, f1_extremely_common))
print()
else:
for method in config["result"]:
print(method["name"], end='\n')
test_mask = dataset["mask"]["test"].reindex(
index=protein_list, columns=term_list, fill_value=0)
test_annotation = test_mask * full_annotation
with open(method["prediction"]) as fp:
result = json.load(fp)
result = pd.DataFrame(result).fillna(0).reindex(
index=protein_list, columns=term_list, fill_value=0
)
y_true_uncommon = np.zeros(0)
y_true_common = np.zeros(0)
y_true_very_common = np.zeros(0)
y_true_extremely_common = np.zeros(0)
y_score_uncommon = np.zeros(0)
y_score_common = np.zeros(0)
y_score_very_common = np.zeros(0)
y_score_extremely_common = np.zeros(0)
for term in term_list:
y_mask = test_mask[[term]].values.flatten()
y_true = test_annotation[[term]].values.flatten()[y_mask == 1]
if len(np.unique(y_true)) < 2:
continue
y_score = result[[term]].values.flatten()[y_mask == 1]
if 11 <= term_freq[term] <= 30:
y_true_uncommon = np.concatenate((y_true_uncommon, y_true))
y_score_uncommon = np.concatenate((y_score_uncommon, y_score))
elif 31 <= term_freq[term] <= 100:
y_true_common = np.concatenate((y_true_common, y_true))
y_score_common = np.concatenate((y_score_common, y_score))
elif 101 <= term_freq[term] <= 300:
y_true_very_common = np.concatenate((y_true_very_common, y_true))
y_score_very_common = np.concatenate((y_score_very_common, y_score))
elif term_freq[term] >= 301:
y_true_extremely_common = np.concatenate((y_true_extremely_common, y_true))
y_score_extremely_common = np.concatenate((y_score_extremely_common, y_score))
auc_uncommon, aupr_uncommon, f1_uncommon = evaluate_group(y_true_uncommon, y_score_uncommon)
auc_common, aupr_common, f1_common = evaluate_group(y_true_common, y_score_common)
auc_very_common, aupr_very_common, f1_very_common = evaluate_group(y_true_very_common, y_score_very_common)
auc_extremely_common, aupr_extremely_common, f1_extremely_common = evaluate_group(y_true_extremely_common,
y_score_extremely_common)
print("AUC: %.4lf\t%.4lf\t%.4lf\t%.4lf" %
(auc_uncommon, auc_common, auc_very_common, auc_extremely_common))
print("AUPR: %.4lf\t%.4lf\t%.4lf\t%.4lf" %
(aupr_uncommon, aupr_common, aupr_very_common, aupr_extremely_common))
print("F1: %.4lf\t%.4lf\t%.4lf\t%.4lf" %
(f1_uncommon, f1_common, f1_very_common, f1_extremely_common))
print()
|
983,641 | 53f4ae2303d002bff71b6d498fa447c65b0b60ee | # http://www.checkio.org/mission/three-words/
__author__ = 'Vitalii K'
def checkio(words):
words_list = ''.join(['1' if w.isalpha() else '0' for w in words.split()])
return True if '111' in words_list else False
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio("Hello World hello") == True, "Hello"
assert checkio("He is 123 man") == False, "123 man"
assert checkio("1 2 3 4") == False, "Digits"
assert checkio("bla bla bla bla") == True, "Bla Bla"
assert checkio("Hi") == False, "Hi" |
983,642 | 65a4a8181fe6eb4d35366250ef309dbbae3129b8 | import Network
Network.NeuralNet(epoch=100,batch_size=100,save_period=100) |
983,643 | 88255a4f218ab971598e764c74c300f9128f0243 | # Generated by Django 2.1.10 on 2019-08-22 23:19
import account.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0013_auto_20190822_2313'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='pic',
field=models.ImageField(blank=True, default='/pics/default_avatar.jpeg', null=True, upload_to=account.models.pics_location),
),
]
|
983,644 | 87b3d0762e9c5a0b27b06bc5368b870dafd83d7a | """
学习Python字典的用法
"""
MyCon = {12: 'big', 0: 'white', 354: 'gentle', 1: 'good'}
# 访问键为 12 的字典值
# 'big'
print(MyCon[12])
dict = {'Name': 'Fiona', 'Age': 10, 'Class': 'Three'}
# len() 方法计算字典元素个数(键的总个数)
len(dict)
# str() 方法输出字典中可以打印的字符串标识
str(dict)
# type() 方法返回输入的变量类型,如果变量是字典就返回字典类型
type(dict)
# 浅拷贝: 引用对象 赋值
dict2 = dict
# 拷贝
dict3 = dict.copy()
# 赋值会随父对象的修改而修改,拷贝不会随父对象的修改而修改
# 删除键是'Name'的条目
del dict['Name']
# 清空字典所有条目
dict.clear()
# 删除整个字典元素
del dict
# dict.fromkeys(seq[, value])
seq = ('name', 'age', 'class')
# 不指定值
dict = dict.fromkeys(seq)
print("新的字典为 : %s" % str(dict))
# 赋值 10
dict = dict.fromkeys(seq, 10)
print("新的字典为 : %s" % str(dict))
# 赋值一个元组
dict = dict.fromkeys(seq, ('zs', 8, 'Two'))
print("新的字典为 : %s" % str(dict))
# 以上输出结果
# 新的字典为 : {'name': None, 'age': None, 'class': None}
# 新的字典为 : {'name': 10, 'age': 10, 'class': 10}
# 新的字典为 : {'name': ('zs', 8, 'Two'), 'age': ('zs', 8, 'Two'), 'class': ('zs', 8, 'Two')}
|
983,645 | f4d39adf15af1505215adea983657a27a82c1979 | import numpy
from numpy import linalg
def distance(pointA=[0,0], pointB=[0,0]):
assert type(pointA) == list and type(pointB) == list
assert len(pointA) == 2 and len(pointB) == 2
vector = numpy.array(pointA) - numpy.array(pointB)
dist = linalg.norm(vector)
return dist
|
983,646 | 64820f796d6d0a2dcfdbc6697fc8f2a59f8695b5 | """Fake FLI USB camera module"""
import numpy as np
import astropy.io.fits as pyfits
import time
import os
import threading
class FliError(Exception):
"""Exception for FLI camera"""
pass
def numberOfCamera():
"""Return number of available FLI cameras"""
return numCams
def getLibVersion():
"""Get the current library version"""
return "Software Development Library for Linux 1.999.1"
class Camera:
"""FLI usb camera"""
def __init__(self, id, devsn, imgPath=None):
"""(id) : index of the camera device"""
if id < 0 or id >= numCams:
raise FliError("Camera[%d] not available" % id)
self.id = id
self.status = CLOSED
self.exposureID = 0
self.agcid = -1
self.abort = 0
self.temp = None
self.devname = "MicroLine ML4720"
self.devsn = devsn
self.hwRevision = 256
self.fwRevision = 512
self.mode = 0
# read simulated image, contains single or 6 image extensions
if imgPath is not None:
hdulist = pyfits.open(imgPath)
if len(hdulist) > 1:
if hdulist[id+1].data is None:
self.rawdata = np.zeros((1033, 1072), dtype=np.uint16)
else:
self.rawdata = hdulist[id+1].data.astype(np.uint16)
else:
self.rawdata = hdulist[0].data.astype(np.uint16)
else:
self.rawdata = np.zeros((1033, 1072), dtype=np.uint16)
self.lock = threading.Lock()
def getStatusStr(self):
with self.lock:
status = self.status
return Status[status]
def isClosed(self):
with self.lock:
status = self.status
return status == CLOSED
def isReady(self):
with self.lock:
status = self.status
return status == READY
def isExposing(self):
with self.lock:
status = self.status
return status == EXPOSING
def isSetmode(self):
with self.lock:
status = self.status
return status == SETMODE
def open(self):
"""Open the camera device"""
if dev[self.id] != FLI_INVALID_DEVICE:
raise FliError("Device already opened")
dev[self.id] = FLIDEVICE_CAMERA
# set default parameters
self.setTemperature(CCD_TEMP)
self.setHBin(1)
self.setVBin(1)
self.setExpTime(0)
self.setFrame(0, 0, 1072, 1033)
with self.lock:
self.status = READY
self.visibleExpArea = (24, 9, 1048, 1033)
self.defaultExpArea = (0, 0, 1072, 1033)
self.expArea = (0, 0, 1072, 1033)
self.regions = ((0, 0, 0), (0, 0, 0))
def close(self):
"""Close the camera device"""
if dev[self.id] == FLI_INVALID_DEVICE:
raise FliError("Device already closed or not initialized")
dev[self.id] = FLI_INVALID_DEVICE
with self.lock:
self.status = CLOSED
def setExpTime(self, exptime):
"""Set the exposure time in ms"""
with self.lock:
self.exptime = exptime
def setHBin(self, hbin):
"""Set the horizontal binning"""
with self.lock:
self.hbin = hbin
def setVBin(self, vbin):
"""Set the vertical binning"""
with self.lock:
self.vbin = vbin
def setFrame(self, x1, y1, width, height):
"""Set the image area"""
with self.lock:
self.xsize = width
self.ysize = height
self.expArea = (x1, y1, x1+width, y1+height)
def resetFrame(self):
"""Reset the image area"""
with self.lock:
hbin = self.hbin
vbin = self.vbin
if hbin != 1:
self.setHBin(1)
if vbin != 1:
self.setVBin(1)
with self.lock:
self.expArea = self.defaultExpArea
x1, y1, x2, y2 = self.expArea
self.xsize = x2 - x1
self.ysize = y2 - y1
def setTemperature(self, temp):
"""Set the CCD temperature"""
with self.lock:
self.temp = temp
def getTemperature(self):
"""Get the CCD temperature"""
with self.lock:
temp = self.temp
return temp
def getCoolerPower(self):
"""Get the cooler power in percentage"""
return 90.0
def getPixelSize(self):
"""Get the pixel sizes in micron"""
return (0.000013, 0.000013)
def wfits(self, filename=None):
"""Write the image to a FITS file"""
with self.lock:
dark = self.dark
if not filename:
if dark != 0:
filename = self.getNextFilename("dark")
else:
filename = self.getNextFilename("object")
with self.lock:
if(self.data.size == 0):
raise FliError("No image available")
hdu = pyfits.PrimaryHDU(self.data)
hdr = hdu.header
with self.lock:
hdr.set('DATE', self.timestamp, 'exposure begin date')
hdr.set('INSTRUME', self.devname, 'this instrument')
hdr.set('SERIAL', self.devsn, 'serial number')
hdr.set('EXPTIME', self.exptime, 'exposure time (ms)')
hdr.set('VBIN', self.vbin, 'vertical binning')
hdr.set('HBIN', self.hbin, 'horizontal binning')
hdr.set('CCD-TEMP', self.temp, 'CCD temperature')
if dark != 0:
hdr.set('SHUTTER', 'CLOSE', 'shutter status')
else:
hdr.set('SHUTTER', 'OPEN', 'shutter status')
hdr.set('CCDAREA', '[%d:%d,%d:%d]' % self.expArea, 'image area')
hdu.writeto(filename, overwrite=True, checksum=True)
with self.lock:
self.filename = filename
def getNextFilename(self, expType):
"""Fetch the next image filename"""
with self.lock:
self.exposureID += 1
exposureID = self.exposureID
path = os.path.join("$ICS_MHS_DATA_ROOT", 'agcc')
path = os.path.expandvars(os.path.expanduser(path))
if not os.path.isdir(path):
os.makedirs(path, 0o755)
with self.lock:
timestamp = self.timestamp
return os.path.join(path, 'AGC%d_%s_%06d_%s.fits' % \
(self.agcid + 1, expType, exposureID, timestamp))
def cancelExposure(self):
"""Cancel current exposure"""
with self.lock:
status = self.status
if status == EXPOSING:
with self.lock:
self.abort = 1
def expose(self, dark=False, blocking=True):
"""Do exposure and return the image"""
with self.lock:
status = self.status
if status != READY:
raise FliError("Camera not ready, abort expose command")
with self.lock:
self.dark = dark
self.tstart = time.time()
self.timestamp = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(self.tstart))
self.status = EXPOSING
thr = threading.Thread(target=self.exposeHandler)
thr.start()
if blocking:
thr.join()
def exposeHandler(self):
# Check if the exposure is done and write the image
tstart = time.time();
with self.lock:
# add 350ms readout time
exptime = (self.exptime + 350.0) / 1000.0
while (time.time() - tstart < exptime):
time.sleep(POLL_TIME)
with self.lock:
abort = self.abort
if abort != 0:
break
with self.lock:
if self.abort != 0:
# Exposure aborted
self.abort = 0
self.tend = 0
else:
xsize = self.xsize
ysize = self.ysize
self.data = self.rawdata[self.expArea[1]:self.expArea[3], self.expArea[0]:self.expArea[2]]
self.tend = time.time()
self.status = READY
def expose_test(self):
"""Return the test image"""
with self.lock:
self.dark = 1
self.tstart = time.time()
self.timestamp = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(self.tstart))
imagesize = (self.expArea[3] - self.expArea[1],
self.expArea[2] - self.expArea[0])
self.data = np.ones(shape=imagesize, dtype=np.uint16)
self.tend = time.time()
def getModeString(self, mode):
"""Get the camera mode string"""
if mode == 0:
return "4 MHz"
elif mode == 1:
return "500 KHz"
else:
raise FliError("FLIGetCameraModeString failed")
def getMode(self):
"""Get the camera mode string"""
with self.lock:
mode = self.mode
return mode
def setMode(self, mode):
"""Get the camera mode string"""
if mode == 0 or mode == 1:
with self.lock:
self.mode = mode
else:
raise FliError("FLISetCameraMode failed")
def getTotalTime(self):
""" get the total readout + exposure time in second """
with self.lock:
if self.tend == 0:
total = -1
else:
total = self.tend - self.tstart
return total
# module initialization
CLOSED, READY, EXPOSING, SETMODE = range(4)
Status = {CLOSED:"CLOSED", READY:"READY", EXPOSING:"EXPOSING", SETMODE:"SETMODE"}
POLL_TIME = 0.02
CCD_TEMP = -30
FLI_INVALID_DEVICE, FLIDEVICE_CAMERA = 0, 1
numCams = 6
dev = np.zeros(numCams, int)
|
983,647 | 2a3e1122fa8886e52e0faf97a8bb248fc08ce96d | import ast
import pathlib
import astor
import yapf.yapflib.yapf_api as yapf_api
from app.obfuscation.intensio import intensio_replace
from app.obfuscation.tree_scanner import TreeScanner
from app.obfuscation.semantic_obfuscator import Obfuscator, ObfuscatedTreeResolver
from app.obfuscation.syntax_obfuscator import ObfuscateBySyntaxGenerator
from app.obfuscation import obfuscation_settings
def obfuscate(source_code):
vars_renamer = intensio_replace.ObfuscateRenameVars()
vars_renamed_source_code = vars_renamer.rename_vars(source_code)
tree = ast.parse(vars_renamed_source_code)
tree_scanner = TreeScanner()
tree_scanner.visit(tree)
obfuscator = Obfuscator(tree_scanner)
obfuscated_tree = obfuscator.visit(tree)
tree_resolver = ObfuscatedTreeResolver(tree_scanner, obfuscator.additional_function_defs)
tree_resolver.resolve()
obfuscated_source = astor.to_source(obfuscated_tree, source_generator_class=ObfuscateBySyntaxGenerator)
style_config_path = pathlib.Path(__file__).parent / 'obfuscated_code_style' / '.style.yapf'
if obfuscation_settings.settings['syntax']['code_style_obfuscation']['is_on']:
obfuscated_source = yapf_api.FormatCode(obfuscated_source, style_config=str(style_config_path))[0]
return obfuscated_source
|
983,648 | d28c814ead865db85a8630ee31ad59e74f7ee1db | import os
import sys
from network import data_features_dumper as dr
from network import computation_graph
from network.trainer_parse import Trainer
# path = '/home/srq/Datasets/tables/unlv-for-nlp/train'
# glove_path = '/media/srq/Seagate Expansion Drive/Models/GloVe/glove.840B.300d.txt'
#
# data_reader = dr.DataReader(path, glove_path, 'train')
# data_reader.load()
trainer = Trainer()
trainer.init(dump_features_again=False)
trainer.train() |
983,649 | a9c50e5550752b9dad948a9b9e9f057c05a38bc9 | import pytest
@pytest.mark.parametrize('array, result', [([1, 2, 3, 4, 5, 0, 9, 9, 9], [1, 2, 3, 4, 5, 9, 9, 9, 0]),
(["a",0,0,"b","c","d",0,1,0,1,0,3,0,1,9,0,0,0,0,9],["a","b","c","d",1,1,3,1,9,9,0,0,0,0,0,0,0,0,0,0]),
([0,1,None,2,False,1,0],[1,None,2,False,1,0,0]), (["a","b"], ["a","b"]), (["a"],["a"]), ([0,0],[0,0]),
([0],[0]), ([],[])])
def test_move_zeros(array, result):
"""."""
from zeros import move_zeros
assert move_zeros(array) == result
|
983,650 | bd5867123bd6e0c4c8a7944e0e95bb30ec44d357 | # 항목 값으로는 0을 갖는 2*3*4 형태의 3차원 배열을 생성하는
# 리스트 내포 기능을 이용한 프로그램을 작성하십시오.
# 출력:
# [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0] ] ]
# { [0,0,0,0] [0,0,0,0] [0,0,0,0] }, { [0,0,0,0],[0,0,0,0],[0,0,0,0] }
lst2 =[]
for k in range(2):
lst1 =[]
for j in range(3):
lst = []
for i in range(4):
lst.append(0)
lst1.append(lst)
lst2.append(lst1)
print(lst2) |
983,651 | af4d9812403ffbfeeed0c221cff4567d6afdf0b6 | #!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
"""Checks for correct output and errors
"""
class TestMaxInteger(unittest.TestCase):
def test_max_integer(self):
self.assertEqual(max_integer([1, 3, 4, 2]), 4)
self.assertEqual(max_integer([-5, 4, 10]), 10)
self.assertEqual(max_integer([-15, -12]), -12)
self.assertEqual(max_integer([4.1, 4, 2, 3]), 4.1)
self.assertEqual(max_integer([57298102983, 2345, 1234]), 57298102983)
self.assertEqual(max_integer([]), None)
self.assertEqual(max_integer([True, False, 200]), 200)
self.assertEqual(max_integer((1,2,3,4,5)), 5)
self.assertEqual(max_integer([[1, 2, 3, 4], [1, 2, 3, 50]]),\
[1, 2, 3, 50])
self.assertEqual(max_integer(), None)
self.assertEqual(max_integer([3]), 3)
self.assertRaises(TypeError, max_integer, [1, 2, "a"])
self.assertTrue(len(max_integer.__doc__) != 0)
if __name__ == "__main__":
unittest.main()
|
983,652 | 2718376ef8b6df72bd5a2f1ca787df045129b76e | from mock import Mock
from mock import PropertyMock
import django
from django.test import TestCase
from django.utils.translation import override
from rest_framework import views
from rest_framework.response import Response
from rest_framework.test import APIRequestFactory
from rest_framework_extensions.key_constructor.bits import (
KeyBitDictBase,
UniqueMethodIdKeyBit,
LanguageKeyBit,
FormatKeyBit,
UserKeyBit,
HeadersKeyBit,
RequestMetaKeyBit,
QueryParamsKeyBit,
UniqueViewIdKeyBit,
PaginationKeyBit,
ListSqlQueryKeyBit,
RetrieveSqlQueryKeyBit,
ListModelKeyBit,
RetrieveModelKeyBit,
ArgsKeyBit,
KwargsKeyBit,
)
from .models import BitTestModel
factory = APIRequestFactory()
class KeyBitDictBaseTest(TestCase):
def setUp(self):
self.kwargs = {
'params': [],
'view_instance': None,
'view_method': None,
'request': None,
'args': None,
'kwargs': None
}
def test_should_raise_exception_if__get_source_dict__is_not_implemented(self):
class KeyBitDictChild(KeyBitDictBase):
pass
try:
KeyBitDictChild().get_data(**self.kwargs)
except NotImplementedError:
pass
else:
self.fail('Should raise NotImplementedError if "get_source_dict" method is not implemented')
def test_should_return_empty_dict_if_source_dict_is_empty(self):
class KeyBitDictChild(KeyBitDictBase):
def get_source_dict(self, **kwargs):
return {}
self.assertEqual(KeyBitDictChild().get_data(**self.kwargs), {})
def test_should_retrieve_data_by_keys_from_params_list_from_source_dict(self):
class KeyBitDictChild(KeyBitDictBase):
def get_source_dict(self, **kwargs):
return {
'id': 1,
'geobase_id': 123,
'name': 'London',
}
self.kwargs['params'] = ['name', 'geobase_id']
expected = {
'name': u'London',
'geobase_id': u'123',
}
self.assertEqual(KeyBitDictChild().get_data(**self.kwargs), expected)
def test_should_not_retrieve_data_with_none_value(self):
class KeyBitDictChild(KeyBitDictBase):
def get_source_dict(self, **kwargs):
return {
'id': 1,
'geobase_id': 123,
'name': None,
}
self.kwargs['params'] = ['name', 'geobase_id']
expected = {
'geobase_id': u'123',
}
self.assertEqual(KeyBitDictChild().get_data(**self.kwargs), expected)
def test_should_force_text_for_value(self):
class KeyBitDictChild(KeyBitDictBase):
def get_source_dict(self, **kwargs):
return {
'id': 1,
'geobase_id': 123,
'name': 'Лондон',
}
self.kwargs['params'] = ['name', 'geobase_id']
expected = {
'geobase_id': u'123',
'name': u'Лондон',
}
self.assertEqual(KeyBitDictChild().get_data(**self.kwargs), expected)
def test_should_prepare_key_before_retrieving(self):
class KeyBitDictChild(KeyBitDictBase):
def get_source_dict(self, **kwargs):
return {
'id': 1,
'GEOBASE_ID': 123,
'NAME': 'London',
}
def prepare_key_for_value_retrieving(self, key):
return key.upper()
self.kwargs['params'] = ['name', 'geobase_id']
expected = {
'geobase_id': u'123',
'name': u'London',
}
self.assertEqual(KeyBitDictChild().get_data(**self.kwargs), expected)
def test_should_prepare_key_before_value_assignment(self):
class KeyBitDictChild(KeyBitDictBase):
def get_source_dict(self, **kwargs):
return {
'id': 1,
'geobase_id': 123,
'name': 'London',
}
def prepare_key_for_value_assignment(self, key):
return key.upper()
self.kwargs['params'] = ['name', 'geobase_id']
expected = {
'GEOBASE_ID': u'123',
'NAME': u'London',
}
self.assertEqual(KeyBitDictChild().get_data(**self.kwargs), expected)
def test_should_produce_exact_results_for_equal_params_attribute_with_different_items_ordering(self):
class KeyBitDictChild(KeyBitDictBase):
def get_source_dict(self, **kwargs):
return {
'id': 1,
'GEOBASE_ID': 123,
'NAME': 'London',
}
self.kwargs['params'] = ['name', 'geobase_id']
response_1 = KeyBitDictChild().get_data(**self.kwargs)
self.kwargs['params'] = ['geobase_id', 'name']
response_2 = KeyBitDictChild().get_data(**self.kwargs)
self.assertEqual(response_1, response_2)
class UniqueViewIdKeyBitTest(TestCase):
def test_resulting_dict(self):
class TestView(views.APIView):
def get(self, request, *args, **kwargs):
return Response('Response from method')
view_instance = TestView()
kwargs = {
'params': None,
'view_instance': view_instance,
'view_method': view_instance.get,
'request': None,
'args': None,
'kwargs': None
}
expected = u'tests_app.tests.unit.key_constructor.bits.tests' + u'.' + u'TestView'
self.assertEqual(UniqueViewIdKeyBit().get_data(**kwargs), expected)
class UniqueMethodIdKeyBitTest(TestCase):
def test_resulting_dict(self):
class TestView(views.APIView):
def get(self, request, *args, **kwargs):
return Response('Response from method')
view_instance = TestView()
kwargs = {
'params': None,
'view_instance': view_instance,
'view_method': view_instance.get,
'request': None,
'args': None,
'kwargs': None
}
expected = u'tests_app.tests.unit.key_constructor.bits.tests' + u'.' + u'TestView' + u'.' + u'get'
self.assertEqual(UniqueMethodIdKeyBit().get_data(**kwargs), expected)
class LanguageKeyBitTest(TestCase):
def test_resulting_dict(self):
kwargs = {
'params': None,
'view_instance': None,
'view_method': None,
'request': None,
'args': None,
'kwargs': None
}
expected = u'br'
with override('br'):
self.assertEqual(LanguageKeyBit().get_data(**kwargs), expected)
class FormatKeyBitTest(TestCase):
def test_resulting_dict(self):
kwargs = {
'params': None,
'view_instance': None,
'view_method': None,
'request': factory.get(''),
'args': None,
'kwargs': None
}
kwargs['request'].accepted_renderer = Mock(format='super-format')
expected = u'super-format'
self.assertEqual(FormatKeyBit().get_data(**kwargs), expected)
class UserKeyBitTest(TestCase):
def setUp(self):
self.kwargs = {
'params': None,
'view_instance': None,
'view_method': None,
'request': factory.get(''),
'args': None,
'kwargs': None
}
self.user = Mock()
self.user.id = 123
self.is_authenticated = PropertyMock(return_value=False)
type(self.user).is_authenticated = self.is_authenticated
def test_without_user_in_request(self):
expected = u'anonymous'
self.assertEqual(UserKeyBit().get_data(**self.kwargs), expected)
def test_with_not_autenticated_user(self):
self.kwargs['request'].user = self.user
expected = u'anonymous'
self.assertEqual(UserKeyBit().get_data(**self.kwargs), expected)
def test_with_autenticated_user(self):
self.kwargs['request'].user = self.user
self.is_authenticated.return_value = True
expected = u'123'
self.assertEqual(UserKeyBit().get_data(**self.kwargs), expected)
class HeadersKeyBitTest(TestCase):
def test_resulting_dict(self):
self.kwargs = {
'params': ['Accept-Language', 'X-Geobase-Id', 'Not-Existing-Header'],
'view_instance': None,
'view_method': None,
'request': factory.get('', **{
'HTTP_ACCEPT_LANGUAGE': 'Ru',
'HTTP_X_GEOBASE_ID': 123
}),
'args': None,
'kwargs': None
}
expected = {
'accept-language': u'Ru',
'x-geobase-id': u'123'
}
self.assertEqual(HeadersKeyBit().get_data(**self.kwargs), expected)
class RequestMetaKeyBitTest(TestCase):
def test_resulting_dict(self):
self.kwargs = {
'params': ['REMOTE_ADDR', 'REMOTE_HOST', 'not_existing_key'],
'view_instance': None,
'view_method': None,
'request': factory.get('', **{
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_HOST': 'localhost'
}),
'args': None,
'kwargs': None
}
expected = {
'REMOTE_ADDR': u'127.0.0.1',
'REMOTE_HOST': u'localhost'
}
self.assertEqual(RequestMetaKeyBit().get_data(**self.kwargs), expected)
class QueryParamsKeyBitTest(TestCase):
def setUp(self):
self.kwargs = {
'params': None,
'view_instance': None,
'view_method': None,
'request': factory.get('?part=Londo&callback=jquery_callback'),
'args': None,
'kwargs': None
}
def test_resulting_dict(self):
self.kwargs['params'] = ['part', 'callback', 'not_existing_param']
expected = {
'part': u'Londo',
'callback': u'jquery_callback'
}
self.assertEqual(QueryParamsKeyBit().get_data(**self.kwargs), expected)
def test_resulting_dict_all_params(self):
self.kwargs['params'] = '*'
expected = {
'part': u'Londo',
'callback': u'jquery_callback'
}
self.assertEqual(QueryParamsKeyBit().get_data(**self.kwargs), expected)
def test_default_params_is_all_args(self):
self.assertEqual(QueryParamsKeyBit().params, '*')
class PaginationKeyBitTest(TestCase):
def setUp(self):
self.kwargs = {
'params': None,
'view_instance': Mock(spec_set=['paginator']),
'view_method': None,
'request': factory.get('?page_size=10&page=1&limit=5&offset=15&cursor=foo'),
'args': None,
'kwargs': None
}
def test_view_without_pagination_arguments(self):
self.kwargs['view_instance'] = Mock(spec_set=[])
self.assertEqual(PaginationKeyBit().get_data(**self.kwargs), {})
def test_view_with_empty_pagination_arguments(self):
self.kwargs['view_instance'].paginator.page_query_param = None
self.kwargs['view_instance'].paginator.page_size_query_param = None
self.assertEqual(PaginationKeyBit().get_data(**self.kwargs), {})
def test_view_with_page_kwarg(self):
self.kwargs['view_instance'].paginator.page_query_param = 'page'
self.kwargs['view_instance'].paginator.page_size_query_param = None
self.assertEqual(PaginationKeyBit().get_data(**self.kwargs), {'page': '1'})
def test_view_with_paginate_by_param(self):
self.kwargs['view_instance'].paginator.page_query_param = None
self.kwargs['view_instance'].paginator.page_size_query_param = 'page_size'
self.assertEqual(PaginationKeyBit().get_data(**self.kwargs), {'page_size': '10'})
def test_view_with_all_pagination_attrs(self):
self.kwargs['view_instance'].paginator.page_query_param = 'page'
self.kwargs['view_instance'].paginator.page_size_query_param = 'page_size'
self.assertEqual(PaginationKeyBit().get_data(**self.kwargs), {'page_size': '10', 'page': '1'})
def test_view_with_all_pagination_attrs__without_query_params(self):
self.kwargs['view_instance'].paginator.page_query_param = 'page'
self.kwargs['view_instance'].paginator.page_size_query_param = 'page_size'
self.kwargs['request'] = factory.get('')
self.assertEqual(PaginationKeyBit().get_data(**self.kwargs), {})
def test_view_with_offset_pagination_attrs(self):
self.kwargs['view_instance'].paginator.limit_query_param = 'limit'
self.kwargs['view_instance'].paginator.offset_query_param = 'offset'
self.assertEqual(PaginationKeyBit().get_data(**self.kwargs), {'limit': '5', 'offset': '15'})
def test_view_with_cursor_pagination_attrs(self):
self.kwargs['view_instance'].paginator.cursor_query_param = 'cursor'
self.assertEqual(PaginationKeyBit().get_data(**self.kwargs), {'cursor': 'foo'})
class ListSqlQueryKeyBitTest(TestCase):
def setUp(self):
self.kwargs = {
'params': None,
'view_instance': Mock(),
'view_method': None,
'request': None,
'args': None,
'kwargs': None
}
self.kwargs['view_instance'].get_queryset = Mock(return_value=BitTestModel.objects.all())
self.kwargs['view_instance'].filter_queryset = lambda x: x.filter(is_active=True)
def test_should_use_view__get_queryset__and_filter_it_with__filter_queryset(self):
if django.VERSION >= (3, 1):
expected = ('SELECT "unit_bittestmodel"."id", "unit_bittestmodel"."is_active" '
'FROM "unit_bittestmodel" '
'WHERE "unit_bittestmodel"."is_active"')
else:
expected = ('SELECT "unit_bittestmodel"."id", "unit_bittestmodel"."is_active" '
'FROM "unit_bittestmodel" '
'WHERE "unit_bittestmodel"."is_active" = True')
response = ListSqlQueryKeyBit().get_data(**self.kwargs)
self.assertEqual(response, expected)
def test_should_return_none_if_empty_queryset(self):
self.kwargs['view_instance'].filter_queryset = lambda x: x.none()
response = ListSqlQueryKeyBit().get_data(**self.kwargs)
self.assertEqual(response, None)
def test_should_return_none_if_empty_result_set_raised(self):
self.kwargs['view_instance'].filter_queryset = lambda x: x.filter(pk__in=[])
response = ListSqlQueryKeyBit().get_data(**self.kwargs)
self.assertEqual(response, None)
class ListModelKeyBitTest(TestCase):
def setUp(self):
self.kwargs = {
'params': None,
'view_instance': Mock(),
'view_method': None,
'request': None,
'args': None,
'kwargs': None
}
self.kwargs['view_instance'].get_queryset = Mock(return_value=BitTestModel.objects.all())
self.kwargs['view_instance'].filter_queryset = lambda x: x.filter(is_active=True)
def test_should_use_view__get_queryset__and_filter_it_with__filter_queryset(self):
# create 4 models
BitTestModel.objects.create(is_active=True)
BitTestModel.objects.create(is_active=True)
BitTestModel.objects.create(is_active=True)
BitTestModel.objects.create(is_active=True)
expected = u"[(1, True), (2, True), (3, True), (4, True)]"
response = ListModelKeyBit().get_data(**self.kwargs)
self.assertEqual(response, expected)
def test_should_return_none_if_empty_queryset(self):
self.kwargs['view_instance'].filter_queryset = lambda x: x.none()
response = ListModelKeyBit().get_data(**self.kwargs)
self.assertEqual(response, None)
def test_should_return_none_if_empty_result_set_raised(self):
self.kwargs['view_instance'].filter_queryset = lambda x: x.filter(pk__in=[])
response = ListModelKeyBit().get_data(**self.kwargs)
self.assertEqual(response, None)
class RetrieveSqlQueryKeyBitTest(TestCase):
def setUp(self):
self.kwargs = {
'params': None,
'view_instance': Mock(),
'view_method': None,
'request': None,
'args': None,
'kwargs': None
}
self.kwargs['view_instance'].kwargs = {'id': 123}
self.kwargs['view_instance'].lookup_field = 'id'
self.kwargs['view_instance'].lookup_url_kwarg = None
self.kwargs['view_instance'].get_queryset = Mock(return_value=BitTestModel.objects.all())
self.kwargs['view_instance'].filter_queryset = lambda x: x.filter(is_active=True)
def test_should_use_view__get_queryset__and_filter_it_with__filter_queryset__and_filter_by__lookup_field(self):
if django.VERSION >= (3, 1):
expected = ('SELECT "unit_bittestmodel"."id", "unit_bittestmodel"."is_active" '
'FROM "unit_bittestmodel" '
'WHERE ("unit_bittestmodel"."is_active" AND "unit_bittestmodel"."id" = 123)')
else:
expected = ('SELECT "unit_bittestmodel"."id", "unit_bittestmodel"."is_active" '
'FROM "unit_bittestmodel" '
'WHERE ("unit_bittestmodel"."is_active" = True AND "unit_bittestmodel"."id" = 123)')
response = RetrieveSqlQueryKeyBit().get_data(**self.kwargs)
self.assertEqual(response, expected)
def test_should_use_view__get_queryset__and_filter_it_with__filter_queryset__and_filter_by__lookup_field__and_get_kwarg_from_kwarg_lookup(self):
self.kwargs['view_instance'].kwargs = {'custom_kwarg_id': 456}
self.kwargs['view_instance'].lookup_url_kwarg = 'custom_kwarg_id'
if django.VERSION >= (3, 1):
expected = ('SELECT "unit_bittestmodel"."id", "unit_bittestmodel"."is_active" '
'FROM "unit_bittestmodel" '
'WHERE ("unit_bittestmodel"."is_active" AND "unit_bittestmodel"."id" = 456)')
else:
expected = ('SELECT "unit_bittestmodel"."id", "unit_bittestmodel"."is_active" '
'FROM "unit_bittestmodel" '
'WHERE ("unit_bittestmodel"."is_active" = True AND "unit_bittestmodel"."id" = 456)')
response = RetrieveSqlQueryKeyBit().get_data(**self.kwargs)
self.assertEqual(response, expected)
def test_with_bad_lookup_value(self):
self.kwargs['view_instance'].kwargs = {'id': "I'm ganna hack u are!"}
response = RetrieveSqlQueryKeyBit().get_data(**self.kwargs)
self.assertEqual(response, None)
def test_should_return_none_if_empty_queryset(self):
self.kwargs['view_instance'].filter_queryset = lambda x: x.none()
response = RetrieveSqlQueryKeyBit().get_data(**self.kwargs)
self.assertEqual(response, None)
def test_should_return_none_if_empty_result_set_raised(self):
self.kwargs['view_instance'].filter_queryset = lambda x: x.filter(pk__in=[])
response = RetrieveSqlQueryKeyBit().get_data(**self.kwargs)
self.assertEqual(response, None)
class RetrieveModelKeyBitTest(TestCase):
def setUp(self):
self.kwargs = {
'params': None,
'view_instance': Mock(),
'view_method': None,
'request': None,
'args': None,
'kwargs': None
}
self.kwargs['view_instance'].kwargs = {'id': 123}
self.kwargs['view_instance'].lookup_field = 'id'
self.kwargs['view_instance'].get_queryset = Mock(return_value=BitTestModel.objects.all())
self.kwargs['view_instance'].filter_queryset = lambda x: x.filter(is_active=True)
def test_should_use_view__get_queryset__and_filter_it_with__filter_queryset__and_filter_by__lookup_field(self):
model = BitTestModel.objects.create(is_active=True)
self.kwargs['view_instance'].kwargs = {'id': model.id}
expected = u"[(%s, True)]" % model.id
response = RetrieveModelKeyBit().get_data(**self.kwargs)
self.assertEqual(response, expected)
def test_with_bad_lookup_value(self):
self.kwargs['view_instance'].kwargs = {'id': "I'm ganna hack u are!"}
response = RetrieveModelKeyBit().get_data(**self.kwargs)
self.assertEqual(response, None)
def test_should_return_none_if_empty_queryset(self):
self.kwargs['view_instance'].filter_queryset = lambda x: x.none()
response = RetrieveModelKeyBit().get_data(**self.kwargs)
self.assertEqual(response, None)
def test_should_return_none_if_empty_result_set_raised(self):
self.kwargs['view_instance'].filter_queryset = lambda x: x.filter(pk__in=[])
response = RetrieveModelKeyBit().get_data(**self.kwargs)
self.assertEqual(response, None)
class ArgsKeyBitTest(TestCase):
def setUp(self):
self.test_args = ['abc', 'foobar', 'xyz']
self.kwargs = {
'params': None,
'view_instance': None,
'view_method': None,
'request': None,
'args': self.test_args,
'kwargs': None
}
def test_with_no_args(self):
self.assertEqual(ArgsKeyBit().get_data(**self.kwargs), [])
def test_with_all_args(self):
self.kwargs['params'] = '*'
self.assertEqual(ArgsKeyBit().get_data(**self.kwargs), self.test_args)
def test_with_specified_args(self):
self.kwargs['params'] = test_arg_idx = [0, 2]
expected_args = [self.test_args[i] for i in test_arg_idx]
self.assertEqual(ArgsKeyBit().get_data(**self.kwargs), expected_args)
def test_default_params_is_all_args(self):
self.assertEqual(ArgsKeyBit().params, '*')
class KwargsKeyBitTest(TestCase):
def setUp(self):
self.test_kwargs = {
'one': '1',
'city': 'London',
}
self.kwargs = {
'params': None,
'view_instance': None,
'view_method': None,
'request': None,
'args': None,
'kwargs': self.test_kwargs,
}
def test_resulting_dict_all_kwargs(self):
self.kwargs['params'] = '*'
self.assertEqual(KwargsKeyBit().get_data(**self.kwargs), self.test_kwargs)
def test_resulting_dict_specified_kwargs(self):
keys = ['one', 'not_existing_param']
expected_kwargs = {'one': self.test_kwargs['one']}
self.kwargs['params'] = keys
self.assertEqual(KwargsKeyBit().get_data(**self.kwargs), expected_kwargs)
def test_resulting_dict_no_kwargs(self):
self.assertEqual(KwargsKeyBit().get_data(**self.kwargs), {})
def test_default_params_is_all_args(self):
self.assertEqual(KwargsKeyBit().params, '*')
|
983,653 | 4825da8518638ca29ea113a3d0f0c57d70f15c7d | import argparse
from datetime import datetime
import math
from dotenv import load_dotenv
load_dotenv()
import pandas as pd
import randorank as rr
import sqlalchemy as sa
from db import connect_races, connect_rankings
def parse_arguments():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='commands', action='store')
parser_filter = subparsers.add_parser('filter_players')
parser_filter.add_argument('players',
type=str,
help='A space or comma separated string of players not to rank')
parser_filter.set_defaults(func=filter_players)
parser_rank = subparsers.add_parser('rank')
parser_rank.set_defaults(func=rank)
# I had an "export" argument here that could take the final results and
# pipe them to stdout as json or what have you, but we could just read from
# the database if/when necessary I figure.
parser_configure = subparsers.add_parser('configure')
parser_configure.set_defaults(func=configure)
parser_configure.add_argument('--period_length',
type=int,
required=True,
help='Length of a period in weeks',
default=4)
parser_configure.add_argument('--num_periods',
type=int,
required=True,
help='Number of periods in a season',
default=3)
parser_configure.add_argument('--season_start',
type=str,
required=True,
help='''
The date you want the season to start in the
format YYYY-MM-DD
''')
parser.parse_args()
def rank(args):
'''
Gets all season race data from races db based on season start date in "meta" table
of rankings db. Filters and separates races by category. Updates rankings db with
current rankings, where that data can be used as desired. Each category should
have its own table in the rankings database.
'''
# first get the configuration data from the rankings db
rankings_conn = connect_races()
rankings_cursor = rankings_conn.cursor()
if rankings_conn.execute("SELECT * FROM meta").fetchone() is None:
print("Please set config values with the config command")
return
period_length = rankings_cursor.execute("SELECT period_length FROM meta").fetchone()
num_periods = rankings_cursor.execute("SELECT num_periods FROM meta").fetchone()
season_start = rankings_cursor.execute("SELECT season_start FROM meta").fetchone()
season_end = season_start + datetime.timedelta(weeks=num_periods * period_length)
filtered_players = rankings_cursor.execute("SELECT players FROM filtered_players").fetchall()
# get all the races in the season. we sort ascending here for the function that separates
# races into periods.
races_conn = connect_races()
races_cursor = races_conn.cursor()
race_query = f'SELECT * FROM races WHERE (date >= {season_start} AND date < {season_end}) ORDER BY date ASC'
# after fetching the previous query which gets all the races, we have
# tuples with race ids and goals. we separate the ids by goal and then
# use the ids for those goals to get those races
# [(id, 'alttphacks', goal, datetime, num_racers)]
race_ids = races_cursor.execute(race_query).fetchall()
open_standard_ids = filter(lambda x: x[2] in open_standard_goals, race_ids)
cross_keys_ids = filter(lambda x: x[2] in cross_keys_goals, race_ids)
mystery_ids = filter(lambda x: x[2] in mystery_goals, race_ids)
open_standard_races = get_races(open_standard_ids, races_cursor, filtered_players)
cross_keys_races = get_races(cross_keys_ids, races_cursor, filtered_players)
mystery_races = get_races(mystery_ids, races_cursor, filtered_players)
# now we separate all the races that we got earlier for the whole season into periods
# which are calculated at the same time as if they happened at once. with glicko-2,
# it doesn't matter *when* a game happened within a period, only that it happened
# during that period
season_open_standard = separate_periods(open_standard_races, season_start, period_length)
season_cross_keys = separate_periods(open_standard_races, season_start, period_length)
season_mystery = separate_periods(open_standard_races, season_start, period_length)
# we have all our races separated into periods, now we can rank and update each
# category's db table
# we'll use the same constants for every category; they should be roughly accurate
# but it may be worth looking into tweaking some of these for cross keys and mystery
glicko_constants = {'tau': .2,
'multi_slope': .008,
'multi_cutoff': 6,
'norm_factor': 1.3,
'victory_margin': 600,
'initial_rating': 1500,
'initial_deviation': 300,
'initial_volatility': .23
}
open_standard_rankings = []
cross_keys_rankings = []
mystery_rankings = []
# loop through every category's races and get the final rankings to put in
# above lists[-1]
for period in season_open_standard:
if len(open_standard_rankings) == 0:
# we're on the first period
new_period = rr.MultiPeriod()
new_period.set_constants(glicko_constants)
else:
new_period = rr.MultiPeriod()
new_period.set_constants(glicko_constants)
# if we're not calculating the first period, we add player variables
# from the previous period
new_period.add_players(open_standard_rankings[-1])
# first we filter the period's races to exclude races with only one participant
# (just in case)
filtered_period = filter(lambda x: len(x) > 1, period)
# now we do another filter to make sure that there's at least one finisher and that
# everybody didn't forfeit
filtered_period = filter(lambda x: len(list(filter(lambda y: math.isnan(y) is False, x.values()))) > 1, period)
# now we can add the filtered period as a list (since it is currently a filter object)
new_period.add_races(list(filtered_period))
# put the final rankings for this period into a dict, add that dict to the end of
# the rankings list we made earlier so we can reference it later. glicko-2 uses
# rating, deviation, and volatility from the previous period in its calculations
# for subsequent periods (we add these with the add_players method above in the
# else branch where we're not on the first period)
mid_rankings = new_period.rank()
open_standard_rankings.append(mid_rankings)
for period in season_cross_keys:
if len(cross_keys_rankings) == 0:
new_period = rr.MultiPeriod()
new_period.set_constants(glicko_constants)
else:
new_period = rr.MultiPeriod()
new_period.set_constants(glicko_constants)
new_period.add_players(cross_keys_rankings[-1])
filtered_period = filter(lambda x: len(x) > 1, period)
filtered_period = filter(lambda x: len(list(filter(lambda y: math.isnan(y) is False, x.values()))) >= 1, period)
new_period.add_races(list(filtered_period))
mid_rankings = new_period.rank()
cross_keys_rankings.append(mid_rankings)
for period in season_mystery:
if len(mystery_rankings) == 0:
new_period = rr.MultiPeriod()
new_period.set_constants(glicko_constants)
else:
new_period = rr.MultiPeriod()
new_period.set_constants(glicko_constants)
new_period.add_players(mystery_rankings[-1])
filtered_period = filter(lambda x: len(x) > 1, period)
filtered_period = filter(lambda x: len(list(filter(lambda y: math.isnan(y) is False, x.values()))) >= 1, period)
new_period.add_races(list(filtered_period))
mid_rankings = new_period.rank()
mystery_rankings.append(mid_rankings)
# now we have the most current rankings for each category in the last element of
# their ranking list above, in the form {'name': {'system variable': variable value}}
# we can use the data in these dicts for whatever purpose (eg displaying on a leaderboard)
def configure(args):
'''
Accepts the configuration parameters and adds them to a single-row "meta" table
in the database.
'''
period_length = int(args.period_length)
num_periods = int(args.num_periods)
season_start = datetime.strptime(f'{args.season_start} 00:00:00', "%Y-%M-%d %H:%M:%S")
config_query = f'''
REPLACE INTO meta (period_length, num_periods, season_start)
VALUES ({period_length},{num_periods},{season_start})
'''
conn = connect_rankings()
cursor = conn.cursor()
cursor.execute(config_query)
conn.commit()
conn.close()
def filter_players(args):
'''
Accepts a comma or space separated string of players you don't want to rank
and adds them into a single column table in the db.
'''
added_players = [(f'{i},') for i in args.players.split(" ,")]
conn = connect_rankings()
cursor = conn.cursor()
cursor.executemany(f'INSERT INTO filtered_players VALUES {added_players}')
conn.commit()
conn.close()
def get_races(race_ids, filtered_players, cursor):
all_races = []
for race_id in race_ids:
race_query = f'SELECT * FROM results WHERE race_id={race_id}'
date_query = f'SELECT date FROM races where id={race_id}'
cursor.execute(race_query)
race = cursor.fetchall()
race = map(lambda x: (x[3], x[4]), race)
race = dict(filter(lambda x: x[0] not in filtered_players, race))
race = {k: math.nan if v is None else v for k, v in race.items()}
cursor.execute(date_query)
date = cursor.fetchone()[0]
race = (race, date)
all_races.append(race)
return all_races
def separate_periods(races, season_start, period_length):
"""
Divides races into periods of four weeks each
"""
period_delta = datetime.timedelta(weeks=period_length)
period_lower = season_start
period_upper = season_start + period_delta
period_buf = []
bucket = []
for race in races:
# the date part of the race tuples is a python datetime object
# so isocalendar()[1] is the week in the year (1-52) that the race
# occurred.
# this algorithm (hard coded for 2 week periods) will iteratively
# add races into a "bucket" then once it reaches a race (in the
# collection of tuples that we sorted earlier) in the next period,
# the bucket has all of a period's races. it adds that full bucket
# to period_buf, puts the race (which occurred in the next period)
# to a new bucket, and starts over
if period_lower <= race[1] < period_upper:
bucket.append(race[0])
else:
period_buf.append(bucket)
period_lower += period_delta
period_upper += period_delta
bucket = []
bucket.append(race[0])
# add the last bucket
period_buf.append(bucket)
return period_buf
def main():
args = parse_arguments()
# these are SRL goals used to filter the different categories
# could configure Sahasrahbot to set certain goals and put them here
# note: races without these exact goals will not be ranked
# could also use regular expressions here and in the rank command
# function to filter by keywords/phrases instead of entire goal string
# if we want to include more races, such as tourney matches
# no inverted or keysanity, maybe keysanity in its own leaderboard?
open_standard_goals = [
'vt8 randomizer - casual open',
'vt8 randomizer - standard',
'vt8 randomizer - casual',
'vt8 randomizer - open w/ boots start',
'vt8 randomizer - fast ganon open w/ boots start',
'vt8 randomizer - ambrosia'
]
cross_keys_goals = ['vt8 randomizer - normal open keysanity + entrance shuffle']
mystery_goals = [
'vt8 randomizer - mystery pogchampion',
'vt8 randomizer - mystery weighted',
'vt8 randomizer - mystery unweighted',
'vt8 randomizer - mystery friendly'
]
# need to figure out how the glitched community wants to do this
glitched_goals = []
|
983,654 | a91c9b66fc6042b01956247cec0c58ccb0e46a3d | # Python Crash Course: Chapter 8, Eric Matthews, Functions
# PEP 8 - style coding (79 characters max)
# No spaces for default values
# After last ',' return a new line with second indent.
# Add a commit for all functions with 3 quotes per docstring format.
# import lines at start of program.
# descriptive names, lowercase letter and underscores
def make_pizza(
size, *toppings):
"""Print the list of toppings that have been requested.
*variable will create an empty tuple and pack the values inside.
Like default values, arbitrary arguments are listed after Positional parameters.
arbitrary positional arguments: *args"""
print("\n Making a {size}-inch pizza with the following toppings: ")
for topping in toppings:
print(f"- {topping}") |
983,655 | 2e2f905cff3c503c31de5c89b39f81eeb180f183 | import numpy as np
from ripser import ripser
from persim import plot_diagrams
from sklearn import metrics
import argparse
import matplotlib as mpl
import pylab
from palettable.colorbrewer.qualitative import Set1_9
from palettable.tableau import Tableau_20
from persim import PersImage
##Arguments First
parser = argparse.ArgumentParser(description='Generate plots for given dataset')
parser.add_argument('--filename', type=str, help='Dataset filename', default='None')
parser.add_argument('--delimiter', type=str, help='Data delimiter', default=',')
parser.add_argument('--xlimit', help='Set a fixed limit for x-axis. (Default: use ceiling of max death)')
parser.add_argument('--nolegend', help='Do not print legends on plots.', action="store_true")
parser.add_argument('--inf', help='Value to replace infinite barcodes with (default use xlimit)')
args = parser.parse_args()
fig = mpl.pyplot.figure(figsize=mpl.pyplot.figaspect(.4))
#fig, (ax1, ax2) = mpl.pyplot.subplots(1,2)
#fig.set_size_inches(12,3.5)
if(args.filename == 'None'):
data = np.random.random((100,2))
fig.suptitle('Random data')
else:
data = np.genfromtxt(args.filename, delimiter=args.delimiter)
fig.suptitle(args.filename)
orig_data = data
## Generate the persistence intervals with ripser
diagrams = ripser(data,maxdim=2,thresh=5.0)['dgms']
## Do some sorting and formatting of the persistence intervals
persArray = []
i = 0;
for dim in diagrams:
tempDim = [];
for interval in dim:
interval = np.insert(interval, 0, i)
tempDim.append(interval)
tempDim.sort(key=lambda x: x[1])
i+=1
persArray = persArray + tempDim
data = np.asarray(persArray)
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2)
######BARCODES########
indexNaN = np.isinf(data)
if(args.inf):
print("test")
data[indexNaN] = float(args.inf)
else:
data[indexNaN] = 1
# select our color map
colorPalette = Set1_9.mpl_colors
# set the markers for the scatter plotting of the persistence diagram.
markers = ['o','+','<','d','x','>','1','2','3','4','^']
maxDeath = np.ceil(np.max(data[:,2]))
if args.xlimit is None :
xLimit = maxDeath
else :
xLimit = float(args.xlimit)
# did the user request an x-axis limit below the maximum death value found in the input?
if xLimit < maxDeath :
print('Requested xLimit (' + str(xLimit) + ') value is lower than max death value (' + str(maxDeath) + ')...aborting')
sys.exit()
# are there more dimensions in the data then we have colors for?
if len(colorPalette) < np.max(data[:,0]) :
print('The current colormap has insufficient colors to represent all the dimensions in the data...aborting')
sys.exit()
# build barcode plot
ax1.grid(True, which='both')
ax1.set_xlim(-.025,xLimit+.025)
ax1.set_xlabel('Time')
ax1.set_ylabel('Index')
for i in range(len(data)) :
ax1.hlines(i+1, data[i,1], data[i,2], colors=colorPalette[int(data[i,0])])
# build the legend
dimensions = []
for i in range(int(np.max(data[:,0]))+1) :
dimensions.append(mpl.patches.Patch(color=colorPalette[i], label=r'$H_{}$'.format(i)))
#pylab.legend(handles=dimensions, loc='upper left', bbox_to_anchor=(.05,1))
if not args.nolegend :
ax1.legend(handles=dimensions, loc='center right', bbox_to_anchor=(1,.65))
# build persistence diagram
axisMin = -0.025
axisMax = maxDeath+.025
ax2.grid(True, which='both')
ax2.set_xlim(axisMin, axisMax)
ax2.set_ylim(axisMin, axisMax)
ax2.set_xlabel('Birth')
ax2.set_ylabel('Death')
markerSize=15
# plot the data
for i in range(len(data)) :
ax2.scatter(data[i,1], data[i,2], color=colorPalette[int(data[i,0])], marker=markers[int(data[i,0]) % len(markers)], s=markerSize)
# build the legend and build a 45 degree line
dimensions = []
for i in range(int(np.max(data[:,0]))+1) :
dimensions.append(mpl.patches.Patch(color=colorPalette[i], label=r'$H_{}$'.format(i)))
if not args.nolegend :
ax2.legend(handles=dimensions, loc='center right')
ax2.plot([axisMin, axisMax], [axisMin, axisMax], color='black', linestyle='-', linewidth=2)
pylab.savefig(args.filename + "_tdaPlot.pdf", bbox_inches='tight')
#pylab.show();
|
983,656 | 22359519558bce54da9467d9fc84f629abd41287 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from hsr_flexbe_states.hsr_joint_pose_state import hsr_JointPose
from hsr_flexbe_states.hsr_bounding_box_2d_state import hsr_BoundingBox2D
from hsr_flexbe_states.hsr_tf2_state import hsr_Tf2
from hsr_flexbe_states.hsr_pose_decision import hsr_PoseDecision
from hsr_flexbe_states.hsr_moveit_to_pose_goal_action_state import hsr_MoveitToPoseGoalAction
from hsr_flexbe_states.hsr_grasping_point_detect_state import hsr_GraspingPointDetect
from hsr_flexbe_states.hsr_pose_dummy import hsr_PoseDummy
from hsr_flexbe_states.hsr_image_capture_state import hsr_ImageCapture
from hsr_flexbe_states.hsr_veiw_marker import hsr_ViewMarker
from hsr_flexbe_states.hsr_gripping_object_state import hsr_GrippingObject
from hsr_flexbe_states.hsr_collision_box_state import hsr_CollisionBox
from hsr_flexbe_states.hsr_move_to_neutral import hsr_MoveNeutral
from hsr_flexbe_states.hsr_joint_pose_line_state import hsr_JointPoseLine
from hsr_flexbe_states.hsr_move2center import hsr_Move2Center
from hsr_flexbe_states.hsr_create_approach import hsr_CreateApproach
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Fri Jun 21 2019
@author: tfukui
'''
class hsr_manipulation_testSM(Behavior):
'''
hsr_manipulation_test
'''
def __init__(self):
super(hsr_manipulation_testSM, self).__init__()
self.name = 'hsr_manipulation_test'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:225 y:613, x:1131 y:611
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:14 y:478
_sm_group_0 = OperatableStateMachine(outcomes=['continue'])
with _sm_group_0:
# x:78 y:63
OperatableStateMachine.add('hsr_JointPoseXtion',
hsr_JointPose(arm_lift_joint=0.4, arm_flex_joint=-2.169, arm_roll_joint=0.0, wrist_flex_joint=-3.14/2, wrist_roll_joint=0.0, head_pan_joint=0.0, head_tilt_joint=-0.35),
transitions={'continue': 'hsr_OpenHand'},
autonomy={'continue': Autonomy.Off})
# x:617 y:41
OperatableStateMachine.add('hsr_BoundingBox2D',
hsr_BoundingBox2D(output_topic="/bounding_box_2d_monitor"),
transitions={'completed': 'hsr_GraspingPointDetect', 'failed': 'hsr_MoveNeutral'},
autonomy={'completed': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'rgb_image': 'rgb_image', 'depth_image': 'depth_image', 'camera_info': 'camera_info', 'detection': 'detection'})
# x:973 y:46
OperatableStateMachine.add('hsr_tf2',
hsr_Tf2(before="head_rgbd_sensor_rgb_frame", after="map"),
transitions={'continue': 'hsr_PoseDummy', 'failed': 'hsr_MoveNeutral'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'before_pose': 'grasping_point', 'after_pose': 'target_poses'})
# x:1049 y:220
OperatableStateMachine.add('hsr_PoseDecision',
hsr_PoseDecision(),
transitions={'continue': 'hsr_Move2Center', 'failed': 'hsr_MoveNeutral'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target_poses': 'target_poses', 'category': 'dummy_object', 'failed_objects': 'dummy_failed_objects', 'selected_pose_grasp': 'selected_pose_grasp', 'selected_object_name': 'selected_object_name', 'selected_object_status': 'selected_object_status'})
# x:59 y:411
OperatableStateMachine.add('hsr_MoveitToPoseGoalAction',
hsr_MoveitToPoseGoalAction(move_group='whole_body', action_topic='/move_group', tolerance=0.001, orien_tolerance=True),
transitions={'reached': 'hsr_GrippingObject_close', 'planning_failed': 'hsr_MoveNeutral', 'control_failed': 'hsr_MoveNeutral'},
autonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'pose_goal': 'selected_pose_grasp', 'move_status': 'move_status'})
# x:764 y:42
OperatableStateMachine.add('hsr_GraspingPointDetect',
hsr_GraspingPointDetect(output_topic="/bounding_box_2d_monitor", save=True),
transitions={'completed': 'hsr_tf2', 'failed': 'hsr_MoveNeutral'},
autonomy={'completed': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'rgb_image': 'rgb_image', 'depth_image': 'depth_image', 'camera_info': 'camera_info', 'detection': 'detection', 'grasping_point': 'grasping_point'})
# x:1151 y:42
OperatableStateMachine.add('hsr_PoseDummy',
hsr_PoseDummy(),
transitions={'continue': 'hsr_InitDecision'},
autonomy={'continue': Autonomy.Off},
remapping={'dummy_pose': 'dummy_pose', 'dummy_object': 'dummy_object', 'dummy_failed_objects': 'dummy_failed_objects'})
# x:429 y:40
OperatableStateMachine.add('hsr_ImageCapture',
hsr_ImageCapture(rgb_topic="/hsrb/head_rgbd_sensor/rgb/image_rect_color", depth_topic="/hsrb/head_rgbd_sensor/depth_registered/image_rect_raw", camera_info_topic="/hsrb/head_rgbd_sensor/rgb/camera_info"),
transitions={'completed': 'hsr_BoundingBox2D', 'failed': 'hsr_MoveNeutral'},
autonomy={'completed': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'rgb_image': 'rgb_image', 'depth_image': 'depth_image', 'camera_info': 'camera_info'})
# x:36 y:298
OperatableStateMachine.add('hsr_ViewMaker_grasp_point',
hsr_ViewMarker(),
transitions={'continue': 'hsr_MoveitToPoseGoalAction', 'failed': 'hsr_MoveNeutral'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'view_pose': 'selected_pose_grasp'})
# x:59 y:489
OperatableStateMachine.add('hsr_GrippingObject_close',
hsr_GrippingObject(grasp_force=0.7, mode=True),
transitions={'continue': 'hsr_JointPose_initial', 'failed': 'hsr_MoveNeutral'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'grasp_success': 'grasp_success'})
# x:60 y:566
OperatableStateMachine.add('hsr_JointPose_initial',
hsr_JointPose(arm_lift_joint=0.05, arm_flex_joint=0.0, arm_roll_joint=-3.14/2, wrist_flex_joint=-3.14/2, wrist_roll_joint=0.0, head_pan_joint=0.0, head_tilt_joint=0.0),
transitions={'continue': 'continue'},
autonomy={'continue': Autonomy.Off})
# x:458 y:234
OperatableStateMachine.add('hsr_CollisionBox',
hsr_CollisionBox(offset_z=0.1, offset_dist=0.1, width=1.2, mode='C'),
transitions={'continue': 'hsr_CreateApproach'},
autonomy={'continue': Autonomy.Off},
remapping={'box_pose': 'selected_pose_grasp'})
# x:978 y:533
OperatableStateMachine.add('hsr_MoveNeutral',
hsr_MoveNeutral(),
transitions={'continue': 'continue'},
autonomy={'continue': Autonomy.Off})
# x:217 y:72
OperatableStateMachine.add('hsr_OpenHand',
hsr_GrippingObject(grasp_force=0.7, mode=False),
transitions={'continue': 'hsr_ImageCapture', 'failed': 'hsr_MoveNeutral'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'grasp_success': 'grasp_success'})
# x:30 y:226
OperatableStateMachine.add('hsr_MoveitToPoseAppAction',
hsr_MoveitToPoseGoalAction(move_group='whole_body', action_topic='/move_group', tolerance=0.001, orien_tolerance=True),
transitions={'reached': 'hsr_ViewMaker_grasp_point', 'planning_failed': 'hsr_MoveNeutral', 'control_failed': 'hsr_MoveNeutral'},
autonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'pose_goal': 'selected_pose_approach', 'move_status': 'move_status'})
# x:604 y:239
OperatableStateMachine.add('hsr_JointPoseLine',
hsr_JointPoseLine(line_z=0.0),
transitions={'continue': 'hsr_CollisionBox'},
autonomy={'continue': Autonomy.Off},
remapping={'pose_goal': 'selected_pose_grasp'})
# x:1259 y:247
OperatableStateMachine.add('hsr_RemoveBox',
hsr_CollisionBox(offset_z=0.3, offset_dist=0.1, width=0.7, mode='R'),
transitions={'continue': 'hsr_PoseDecision'},
autonomy={'continue': Autonomy.Off},
remapping={'box_pose': 'dummy_pose'})
# x:211 y:186
OperatableStateMachine.add('hsr_ViewMarker',
hsr_ViewMarker(),
transitions={'continue': 'hsr_MoveitToPoseAppAction', 'failed': 'hsr_MoveNeutral'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'view_pose': 'selected_pose_approach'})
# x:757 y:237
OperatableStateMachine.add('hsr_ArmInit',
hsr_MoveNeutral(),
transitions={'continue': 'hsr_JointPoseLine'},
autonomy={'continue': Autonomy.Off})
# x:889 y:233
OperatableStateMachine.add('hsr_Move2Center',
hsr_Move2Center(before='map', after='base_footprint'),
transitions={'continue': 'hsr_ArmInit', 'failed': 'hsr_MoveNeutral'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target_pose': 'selected_pose_grasp'})
# x:320 y:214
OperatableStateMachine.add('hsr_CreateApproach',
hsr_CreateApproach(offset=0.3),
transitions={'continue': 'hsr_ViewMarker', 'failed': 'hsr_MoveNeutral'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target_pose': 'selected_pose_grasp', 'selected_pose_approach': 'selected_pose_approach', 'selected_pose_grasp': 'selected_pose_grasp'})
# x:1243 y:154
OperatableStateMachine.add('hsr_InitDecision',
hsr_MoveNeutral(),
transitions={'continue': 'hsr_RemoveBox'},
autonomy={'continue': Autonomy.Off})
with _state_machine:
# x:444 y:346
OperatableStateMachine.add('Group',
_sm_group_0,
transitions={'continue': 'finished'},
autonomy={'continue': Autonomy.Inherit})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
983,657 | f42244df667d76ac650df0fc1b785f1106087ac5 | /**
* Fundamental bit manipulation operations you must know Time Complexity: O(1)
*
* @author Micah Stairs
*/
package com.williamfiset.algorithms.other;
public class BitManipulations {
// Sets the i'th bit to 1
public static int setBit(int set, int i) {
return set | (1 << i);
}
// Checks if the i'th is set
public static boolean isSet(int set, int i) {
return (set & (1 << i)) != 0;
}
// Sets the i'th bit to zero
public static int clearBit(int set, int i) {
return set & ~(1 << i);
}
// Toggles the i'th bit from 0 -> 1 or 1 -> 0
public static int toggleBit(int set, int i) {
return set ^ (1 << i);
}
// Returns a number with the first n bits set to 1
public static int setAll(int n) {
return (1 << n) - 1;
}
// Verifies if a number n is a power of two
public static boolean isPowerOfTwo(int n) {
return n > 0 && (n & (n - 1)) == 0;
}
}
|
983,658 | 9ada83858f0844f8a04347316f4ca8b3719dc892 | class circle:
def __init__(self,radius,color):
self.radius = radius
self.color = color
def getdesc(self):
print ("A",self.color,"circle with radius",self.radius)
example = circle(12,"red")
print (example) |
983,659 | 729a6e28df3a1b96fd1debb2b4e9d8cee3564071 | class Vozel:
'''
Razred, ki predstavlja posamezen vozel s podatkom v verižnem seznamu.
'''
def __init__(self, podatek, naslednji=None):
self.podatek = podatek
self.naslednji = naslednji
def get_podatek(self):
'''referenca na podatek'''
return self._podatek
def get_naslednji(self):
'''referenca na naslednji podatek'''
return self._naslednji
def set_naslednji(self, val):
self._naslednji = val
class VerizniSeznam:
'''
Razred, ki predstavlja verižni seznam z začetkom in koncem.
'''
def __init__(self):
self._zacetek = None
self._konec = None
def __str__(self):
niz = ''
vozel = self._zacetek
while vozel is not None:
niz += '{} -> '.format(repr(vozel.podatek))
vozel = vozel.naslednji
return niz + '•'
def vstavi_na_zacetek(self, podatek):
'''podatek vstavi na zacetek seznama'''
vozel = Vozel(podatek)
if self._zacetek is None:
self._zacetek = vozel
else:
prvi = self._zacetek
self._zacetek = vozel
vozel.naslednji = prvi
return self._zacetek
def izpisi_liha(vozel):
'''vrne novo verigo v kateri so sami lihi podatki'''
novVozel = Vozel(42)
konec = Vozel(42)
kje_smo = vozel
while kje_smo is not None:
vozel.podatek = kje_smo.podatek
if vozel.podatek % 2 == 1:
novVozel = Vozel(vozel.podatek)
konec.naslednji = novVozel
konec = konec.naslednji
kje_smo = kje_smo.naslednji
return novVozel
zacetekEna = Vozel(41)
konecEna = zacetekEna
zacetekNic = Vozel(40)
konecNic = zacetekNic
zacetekDva = Vozel(42)
konecDva = zacetekDva
kje_smo = verizni_seznam._zacetek #prvi vozel v veriznem seznamu
while kje_smo is not None:
trenutni_podatek = kje_smo.podatek #podatek tistega vozla, na katerem smo trenutno (kje_smo)
if trenutni_podatek == 0:
konecNic.naslednji = Vozel(trenutni_podatek) #v verigo vozlov z niclami vstavimo en vozel z vrednostjo nic
konecNic = konecNic.naslednji #parameter konecNic spremenimo, da bo kazal na zadnji vozel v verigi
elif trenutni_podatek == 1:
konecEna.naslednji = Vozel(trenutni_podatek)
konecEna = konecEna.naslednji
elif trenutni_podatek == 2:
konecDva.naslednji = Vozel(trenutni_podatek)
konecDva = konecDva.naslednji
else:
print('slaba veriga')
kje_smo = kje_smo.naslednji
#sedaj je treba povezat vse verige skupaj
#najprej na zadnji vozel nicel dodamo prvi vozel ena, .naslednji je zato, da izpustimo pomozni vozel
konecNic.naslednji = zacetekEna.naslednji
#dodamo na koncni vozel enk prvi vozel dvojk, kjer ravno tako vzamemo .naslednji vozel, da izpustimp prvega
konecEna.naslednji = zacetekDva.naslednji |
983,660 | c5425721b9e9ea5ce7a131fd4a9c0f9b97e34d41 | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import numpy as np
from pyiron_base import Settings
from sklearn.cluster import AgglomerativeClustering
from scipy.sparse import coo_matrix
from scipy.spatial import Voronoi
from pyiron_atomistics.atomistics.structure.pyscal import get_steinhardt_parameter_structure, analyse_cna_adaptive, \
analyse_centro_symmetry, analyse_diamond_structure, analyse_voronoi_volume
__author__ = "Joerg Neugebauer, Sam Waseda"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Sam Waseda"
__email__ = "waseda@mpie.de"
__status__ = "production"
__date__ = "Sep 1, 2017"
s = Settings()
def get_average_of_unique_labels(labels, values):
"""
This function returns the average values of those elements, which share the same labels
Example:
>>> labels = [0, 1, 0, 2]
>>> values = [0, 1, 2, 3]
>>> print(get_average_of_unique_labels(labels, values))
array([1, 1, 3])
"""
labels = np.unique(labels, return_inverse=True)[1]
unique_labels = np.unique(labels)
mat = coo_matrix((np.ones_like(labels), (labels, np.arange(len(labels)))))
mean_values = np.asarray(mat.dot(np.asarray(values).reshape(len(labels), -1))/mat.sum(axis=1))
if np.prod(mean_values.shape).astype(int)==len(unique_labels):
return mean_values.flatten()
return mean_values
class Analyse:
""" Class to analyse atom structure. """
def __init__(self, structure):
"""
Args:
structure (:class:`pyiron.atomistics.structure.atoms.Atoms`): reference Atom structure.
"""
self._structure = structure
def get_layers(self, distance_threshold=0.01, id_list=None, wrap_atoms=True, planes=None):
"""
Get an array of layer numbers.
Args:
distance_threshold (float): Distance below which two points are
considered to belong to the same layer. For detailed
description: sklearn.cluster.AgglomerativeClustering
id_list (list/numpy.ndarray): List of atoms for which the layers
should be considered.
planes (list/numpy.ndarray): Planes along which the layers are calculated. Planes are
given in vectors, i.e. [1, 0, 0] gives the layers along the x-axis. Default planes
are orthogonal unit vectors: [[1, 0, 0], [0, 1, 0], [0, 0, 1]]. If you have a
tilted box and want to calculate the layers along the directions of the cell
vectors, use `planes=np.linalg.inv(structure.cell).T`. Whatever values are
inserted, they are internally normalized, so whether [1, 0, 0] is entered or
[2, 0, 0], the results will be the same.
Returns: Array of layer numbers (same shape as structure.positions)
Example I - how to get the number of layers in each direction:
>>> structure = Project('.').create_structure('Fe', 'bcc', 2.83).repeat(5)
>>> print('Numbers of layers:', np.max(structure.analyse.get_layers(), axis=0)+1)
Example II - get layers of only one species:
>>> print('Iron layers:', structure.analyse.get_layers(
... id_list=structure.select_index('Fe')))
"""
if distance_threshold <= 0:
raise ValueError('distance_threshold must be a positive float')
if id_list is not None and len(id_list)==0:
raise ValueError('id_list must contain at least one id')
if wrap_atoms and planes is None:
positions, indices = self._structure.get_extended_positions(
width=distance_threshold, return_indices=True
)
if id_list is not None:
id_list = np.arange(len(self._structure))[np.array(id_list)]
id_list = np.any(id_list[:,np.newaxis]==indices[np.newaxis,:], axis=0)
positions = positions[id_list]
indices = indices[id_list]
else:
positions = self._structure.positions
if id_list is not None:
positions = positions[id_list]
if wrap_atoms:
positions = self._structure.get_wrapped_coordinates(positions)
if planes is not None:
mat = np.asarray(planes).reshape(-1, 3)
positions = np.einsum('ij,i,nj->ni', mat, 1/np.linalg.norm(mat, axis=-1), positions)
layers = []
for ii,x in enumerate(positions.T):
cluster = AgglomerativeClustering(
linkage='complete',
n_clusters=None,
distance_threshold=distance_threshold
).fit(x.reshape(-1,1))
first_occurrences = np.unique(cluster.labels_, return_index=True)[1]
permutation = x[first_occurrences].argsort().argsort()
labels = permutation[cluster.labels_]
if wrap_atoms and planes is None and self._structure.pbc[ii]:
mean_positions = get_average_of_unique_labels(labels, positions)
scaled_positions = np.einsum(
'ji,nj->ni', np.linalg.inv(self._structure.cell), mean_positions
)
unique_inside_box = np.all(np.absolute(scaled_positions-0.5+1.0e-8)<0.5, axis=-1)
arr_inside_box = np.any(
labels[:,None]==np.unique(labels)[unique_inside_box][None,:], axis=-1
)
first_occurences = np.unique(indices[arr_inside_box], return_index=True)[1]
labels = labels[arr_inside_box]
labels -= np.min(labels)
labels = labels[first_occurences]
layers.append(labels)
if planes is not None and len(np.asarray(planes).shape)==1:
return np.asarray(layers).flatten()
return np.vstack(layers).T
def pyscal_steinhardt_parameter(self, neighbor_method="cutoff", cutoff=0, n_clusters=2,
q=(4, 6), averaged=False, clustering=True):
"""
Calculate Steinhardts parameters
Args:
neighbor_method (str) : can be ['cutoff', 'voronoi']
cutoff (float) : can be 0 for adaptive cutoff or any other value
n_clusters (int) : number of clusters for K means clustering
q (list) : can be from 2-12, the required q values to be calculated
averaged (bool) : If True, calculates the averaged versions of the parameter
clustering (bool) : If True, cluster based on the q values
Returns:
list: calculated q parameters
"""
return get_steinhardt_parameter_structure(
self._structure, neighbor_method=neighbor_method, cutoff=cutoff, n_clusters=n_clusters,
q=q, averaged=averaged, clustering=clustering
)
def pyscal_cna_adaptive(self, mode="total", ovito_compatibility=False):
"""
Use common neighbor analysis
Args:
mode ("total"/"numeric"/"str"): Controls the style and level
of detail of the output.
- total : return number of atoms belonging to each structure
- numeric : return a per atom list of numbers- 0 for unknown,
1 fcc, 2 hcp, 3 bcc and 4 icosa
- str : return a per atom string of sructures
ovito_compatibility(bool): use ovito compatiblity mode
Returns:
(depends on `mode`)
"""
return analyse_cna_adaptive(atoms=self._structure, mode=mode, ovito_compatibility=ovito_compatibility)
def pyscal_centro_symmetry(self, num_neighbors=12):
"""
Analyse centrosymmetry parameter
Args:
num_neighbors (int) : number of neighbors
Returns:
list: list of centrosymmetry parameter
"""
return analyse_centro_symmetry(atoms=self._structure, num_neighbors=num_neighbors)
def pyscal_diamond_structure(self, mode="total", ovito_compatibility=False):
"""
Analyse diamond structure
Args:
mode ("total"/"numeric"/"str"): Controls the style and level
of detail of the output.
- total : return number of atoms belonging to each structure
- numeric : return a per atom list of numbers- 0 for unknown,
1 fcc, 2 hcp, 3 bcc and 4 icosa
- str : return a per atom string of sructures
ovito_compatibility(bool): use ovito compatiblity mode
Returns:
(depends on `mode`)
"""
return analyse_diamond_structure(atoms=self._structure, mode=mode, ovito_compatibility=ovito_compatibility)
def pyscal_voronoi_volume(self):
""" Calculate the Voronoi volume of atoms """
return analyse_voronoi_volume(atoms=self._structure)
def get_voronoi_vertices(self, epsilon=2.5e-4, distance_threshold=0, width_buffer=10):
"""
Get voronoi vertices of the box.
Args:
epsilon (float): displacement to add to avoid wrapping of atoms at borders
distance_threshold (float): distance below which two vertices are considered as one.
Agglomerative clustering algorith (sklearn) is employed. Final positions are given
as the average positions of clusters.
width_buffer (float): width of the layer to be added to account for pbc.
Returns:
numpy.ndarray: 3d-array of vertices
This function detect octahedral and tetrahedral sites in fcc; in bcc it detects tetrahedral
sites. In defects (e.g. vacancy, dislocation, grain boundary etc.), it gives a list of
positions interstitial atoms might want to occupy. In order for this to be more successful,
it might make sense to look at the distance between the voronoi vertices and their nearest
neighboring atoms via:
>>> voronoi_vertices = structure_of_your_choice.analyse.get_voronoi_vertices()
>>> neigh = structure_of_your_choice.get_neighborhood(voronoi_vertices)
>>> print(neigh.distances.min(axis=-1))
"""
voro = Voronoi(self._structure.get_extended_positions(width_buffer)+epsilon)
xx = voro.vertices
if distance_threshold > 0:
cluster = AgglomerativeClustering(
linkage='single',
distance_threshold=distance_threshold,
n_clusters=None
)
cluster.fit(xx)
xx = get_average_of_unique_labels(cluster.labels_, xx)
xx = xx[np.linalg.norm(xx-self._structure.get_wrapped_coordinates(xx, epsilon=0), axis=-1)<epsilon]
return xx-epsilon
|
983,661 | 607c438de8684c9859a04713d74b486a0c2f8e7d | import unittest
import datetime
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import testbed
from ndbtestcase import NdbTestCase
from core.models import Post
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
class AdminTestCase(NdbTestCase):
def setUp(self):
super(AdminTestCase, self).setUp()
self.post = Post(
title = 'Test Post',
brief = 'To infinity ... and beyond.',
content = 'Vestibulum id ligula porta felis euismod semper.',
is_active = True,
comments_enabled = False,
date_published = datetime.date(2013,07,04),
)
self.client = Client()
def test_if_add_page_is_reachable(self):
response = self.client.get(reverse('admin_add_post'))
self.assertEqual(response.status_code, 200)
def test_add_new_post(self):
new_post = Post(
title = 'I Am New',
brief = 'The Good, The Bad, and The Ugly.',
content = 'Cras justo odio, dapibus ac facilisis in, egestas eget quam.',
is_active = True,
comments_enabled = False,
date_published = datetime.date(2012,11,22),
)
post = new_post.put().get()
response = self.client.get(reverse('post_view', args=[
post.date_published.strftime("%Y"),
post.date_published.strftime("%m"),
post.date_published.strftime("%d"),
slugify(post.title)])
)
self.assertEqual(response.status_code, 200)
def test_if_edit_page_is_reachable(self):
post = self.post.put().get()
response = self.client.get(
reverse('admin_edit_post', args=[
post.date_published.strftime("%Y"),
post.date_published.strftime("%m"),
post.date_published.strftime("%d"),
slugify(post.title)])
)
self.assertEqual(response.status_code, 200)
def test_edit_exisiting_post(self):
post = self.post.put().get()
post.title = 'Cool New Title'
edited_post = post.put().get()
response = self.client.get(
reverse('admin_edit_post', args=[
edited_post.date_published.strftime("%Y"),
edited_post.date_published.strftime("%m"),
edited_post.date_published.strftime("%d"),
slugify(edited_post.title)])
)
self.assertEqual(response.status_code, 200)
def test_delete_method(self):
deleted_post = self.post.put().delete()
self.assertEqual(deleted_post, None)
if __name__ == '__main__':
unittest.main() |
983,662 | 77dd80921d5ef39e4d408418a665b73c2d49fd77 | import collections
import string
class Phrase(object):
def __init__(self,word):
self.word = word
def word_count(self):
exclude = set(string.punctuation)
self.word = ''.join(ch for ch in self.word if ch not in exclude)
print self.word
return collections.Counter(self.word.lower().replace(" "," ").split(" "))
|
983,663 | 2f471bb3624247d3fae3dd42adfcb260cc5d39a1 | # © Copyright 2021 HP Development Company, L.P.
from pymisp import ExpandedPyMISP, MISPEvent, MISPAttribute, MISPObject
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import requests, logging
import matplotlib.pyplot as plt
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
logging.getLogger("pymisp").setLevel(logging.CRITICAL)
misp = ExpandedPyMISP("MISP_URL_GOES_HERE", "MISP_API_KEY_GOES_HERE", False)
hashes = [
"06e1bc999bb5df10b2472be1822312e7fe229394cdaf652b41bcebd171d63f2f",
"f61151f81c42799bd857298ecb606d19f91b955bdb9ee3aef13bbff40c02ff44",
"b8a9e3b4a0ebdaf111c4499c8e465b30ad2077bc4a62448eeaee5f77cf3a2a66",
"3938bbfdc2befe11089d2a2c3e6fb1b9070b70917f2adb803248398a07e44c73",
"4b812428a3112be107627ca7db82c89f7e7a3f5cbe7b663c2af2f6e20599c67b",
"eff23a6b3184b9032dcd3599c3a636a827399ccad79e7bfc9e22ff70cd5b67cb",
"825ae6835c175c1eed83c2ee4aa2f4065ca87b93d97b2854af55c863b0decddc",
"ca3b5a666dc87c49b31e49193b844fb8f0070f0588f7b9c5572b88f0156d6e40"
]
events = list()
for h in hashes:
events += misp.search_index(pythonify=True, attribute=h)
webservers = dict()
for e in events:
web_added = False
event = misp.search(eventid=e.id, pythonify=True)
if len(event) > 0:
event = event[0]
for attribute in event.attributes:
if attribute.comment == "Webserver" and not web_added:
web_added = True
if attribute.value in webservers:
webservers[attribute.value] += 1
else:
webservers[attribute.value] = 1
webserver_name = list()
webserver_count = list()
for w in dict(sorted(webservers.items(), key=lambda item: item[1], reverse=True)):
webserver_name.append(w)
webserver_count.append(webservers[w])
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar(webserver_name,webserver_count)
plt.xticks(rotation='vertical')
plt.savefig('webserver.png', bbox_inches='tight')
plt.show()
|
983,664 | a5ecdd16e4678b8b51c5aa0637e6669f408dbd7a | from romutils import *
import opcodes
import datetime
from flags import *
from cpubase import *
from ppu import *
from aipu import *
import sys
from mapper.nrom128 import *
class NESCPU(CPUBase):
#This is the CPU base plus the memory mapped peripherals
def __init__(self):
CPUBase.__init__(self)
self.cycleSpeed = 1770000 #NTSC and 1790000 for PAL, to be configurable
self.microsPerCycle = (1/self.cycleSpeed) * 1000000
self.cycleDeltaSum = 0
self.ROMData = None
self.PPU = PPU()
self.APUAndInputManager = AIPU()
self.mapper = NROM128Mapper()
self.flags = Flags()
def loadROM(self, filename):
#This loads the ROM via its filename,
#sets the initial state of the processor
#populates the data and sets the appropriate mapper
self.__powerUpState__()
(self.ROMData, romfile) = loadROM(filename, True)
if self.ROMData.NESVersion == "NES2.0":
raise Exception("NES2.0 ROMs are not yet supported.")
romfile.seek(0,0)
if (self.ROMData.hasTrainer):
#Skip the trainer
romfile.seek(528)
else:
romfile.seek(16)
#Populates our PRG rom data
self.mapper.populate(self.ROMData, romfile)
self.PPU.populate(self.ROMData, romfile)
romfile.close()
def __powerUpState__(self):
pass
def writeByte(self, location, value):
#This writebyte function is important
#because we don't want to access memory directly
#since the NES has memory mapping, we may want specific effects when reading or
#writing data. If the location is less than 0x4020, the specific location is
#delegated - whether APU, PPU, work memory etc - above 0x4020, the mapper
#handles
#Remember that the Python range function is inclusive of the first,
#but exclusive of the last value
if location in range(0x0, 0x800):
#Work memory
self.RAM[location] = value
elif location in range(0x800, 0x2000):
#Work memory mirror
self.RAM[location % 0x800] = value
elif location in range(0x2000, 0x2008):
#PPU memory map
self.PPU.writeByte(location, value)
elif location in range(0x2008, 0x4000):
#PPU memory map mirror
self.PPU.writeByte(0x2000 + (location % 8), value)
elif location in range(0x4000, 0x4018):
#Input and APU
self.APUAndInputManager.writeByte(location, value)
elif location in range(0x4018, 0x4020):
#The test location, unsure of what to do here
pass
else:
#This means it's definitely referring to the memory map
self.mapper.writeByte(location, value)
def readByte(self, location):
#The explanation works as above
if location in range(0x0, 0x800):
#Work memory
return self.RAM[location]
elif location in range(0x800, 0x2000):
#Work memory mirror
return self.RAM[location % 800]
elif location in range(0x2000, 0x2008):
#PPU memory map
return self.PPU.readByte(location)
elif location in range(0x2008, 0x4000):
#PPU memory map mirror
return self.PPU.readByte(0x2000 + (location % 8))
elif location in range(0x4000, 0x4018):
#Input and APU
return self.APUAndInputManager.readByte(location)
elif location in range(0x4018, 0x4020):
#The test location, unsure of what to do here
pass
else:
#This means it's definitely referring to the memory map
self.mapper.readByte(location)
def readWord(self, location):
#The explanation works as above
if location in range(0x0, 0x800):
#Work memory
return (self.RAM[location+1] << 8) + self.RAM[location]
elif location in range(0x800, 0x2000):
#Work memory mirror
return (self.RAM[(location % 800)+1] << 8) + self.RAM[location % 800]
elif location in range(0x2000, 0x2008):
#PPU memory map
return self.PPU.readWord(location)
elif location in range(0x2008, 0x4000):
#PPU memory map mirror
return self.PPU.readWord(0x2000 + (location % 8))
elif location in range(0x4000, 0x4018):
#Input and APU
return self.APUAndInputManager.readWord(location)
elif location in range(0x4018, 0x4020):
#The test location, unsure of what to do here
pass
else:
#This means it's definitely referring to the memory map
self.mapper.readWord(location)
def execute(self):
#This command fetches and runs the current opcode based off where the program
#counter is, increments the program counter accordingly, then returns the
#duration of the command that was just executed in cycles
currentOp = opcodes.code_dict[self.readByte(self.pc)]
advance, duration = self._executeOpcode(currentOp)
self.pc += advance
return duration
def run(self):
#This is the simplest event loop possible ignoring input
#It gets the starting time initially, then gets a time for every iteration
#of the loop. The difference between those is the delta. The duration of the
#last instruction executed (in cycles) is stored. If the delta time between two
#iterations of the loop is more than or equal to the length of the last instruction
#the last instruction should have taken, then you can move on, otherwise wait
start = datetime.now()
while True:
newtime = datetime.now()
delta_us = newtime - start()
if delta_us >= (self.LIDC * self.microsPerCycle):
start = newtime
#Need to check for any pending interrupts here
#The RESET interrupt has first priority, because its a system reset
#The NMI would come from the PPU and has next priority
#BRK has third priority
#IRQ has last priority
#NMI is non-maskable, so will be called whether or not interrupts
#are disabled, and can actually intrude in other interrupts
if self.pendingRST:
self.RESET()
self.LIDC = 7
if self.pendingNMI:
self.NMI()
self.LIDC = 7
if self.flags.getInterruptDisable() == False:
if self.pendingBRK:
self.BRK()
self.LIDC = 7
if self.pendingIRQ:
self.IRQ()
self.LIDC = 7
LIDC = self.execute()
else:
pass
def disassemble(self, fname, ofname):
GameSession = NESCPU()
GameSession.loadROM(fname)
printHeader(GameSession.ROMData)
if ofname == None:
ofname = "disassembly.asm"
i = 0
disasm = open(ofname, "w+")
while i < GameSession.ROMData.PRGRomSize:
if i == GameSession.ROMData.PRGRomSize - 6:
#Handle the interrupt vectors
veclist = GameSession.mapper.romdata[i: i+6]
nmislice = [hex(u) for u in veclist[0:2]]
nmislice.reverse()
tempstr = f"{hex(i)} : NMI Vector - {nmislice} \n"
disasm.write(tempstr)
resetvector = [hex(u) for u in veclist[2:4]]
resetvector.reverse()
tempstr = f"{hex(i+2)} : Reset Vector - {resetvector} \n"
disasm.write(tempstr)
irqslice = [hex(u) for u in veclist[4:6]]
irqslice.reverse()
tempstr = f"{hex(i+4)} : IRQ Vector - {irqslice} \n"
disasm.write(tempstr)
break
try:
currentOp = opcodes.code_dict[GameSession.mapper.readByte(i)]
arglist = []
tempstr = ""
if currentOp.name == "BRK":
#BRK is a 2 byte instruction but only takes up one
tempstr = f"{hex(i)} : {currentOp.name} \n"
disasm.write(tempstr)
i -= 1
elif currentOp.length == 1:
tempstr = f"{hex(i)} : {currentOp.name} \n"
disasm.write(tempstr)
else:
j = 1
templist = []
while j != currentOp.length:
templist.append(GameSession.mapper.readByte(i + j))
j += 1
arglist = [hex(u) for u in templist]
arglist.reverse()
tempstr = f"{hex(i)} : {currentOp.name} {str(arglist)} \n"
disasm.write(tempstr)
i += currentOp.length
except KeyError:
disasm.write(f"{hex(i)} : .DB {hex(GameSession.mapper.readByte(i))} \n")
i += 1
disasm.close()
if __name__ == "__main__":
#For now, if called from the command line, it acts as a disassembler
test = NESCPU()
try:
if len(sys.argv) == 3:
fname = sys.argv[1]
ofname = sys.argv[2]
elif len(sys.argv) == 2:
fname = sys.argv[1]
ofname = None
else:
raise EnvironmentError
test.disassemble(fname, ofname)
except EnvironmentError:
print("To call the disasembler requires two arguments - the name of the file to be disassembled, and where you want the file to go. You can omit the second argument and it will put it in a default file called disassembly.asm")
#The NES memory map is as follows:
#0x0000 - 0x7FF - 2kb internal ram
#0x7FF to 0x1FFF - mirror of the 2kb
#0x2000 to 0x2007 - NES PPU registers
#0x2008 to 0x3FFF - Mirrors of the PPU register
#0x4000 to 0x4017 - Input and APU
#0x4018 to 0x401F - CPU Test
#0x4020 to 0xFFFF - cartridge space and mapper registers
#The written byte needs to be checked and hijacked appropriately |
983,665 | e75170fe305210007e842c6559e4fcf6724b91eb | from dataclasses import dataclass
from reinvent_scoring import ScoringFuncionParameters
from running_modes.configurations import ReactionFilterConfiguration
@dataclass
class ScoringConfiguration:
input: str
output_folder: str
reaction_filter: ReactionFilterConfiguration
scoring_function: ScoringFuncionParameters
|
983,666 | 6bca7f9d23065e28b14b7b3bae5910bece7e1170 | # -*- coding: utf-8 -*-
from odoo.tools.translate import _
from odoo import models, fields, api
import datetime
from odoo.exceptions import ValidationError
now = datetime.datetime.now()
class payment_installment_type(models.Model):
_name = 'payment.installment.type'
name = fields.Char('Payment Installment',required=True)
no_of_installment = fields.Integer('Number of Installment')
duration = fields.Integer('Duration')
days_to_pay = fields.Integer('Day(s) to Pay')
# week_of_issue = fields.Selection([('one', 'One Week'), ('two', 'Two Weeks'), ('three', 'Three Weeks')], 'Week(s) of Issue')
@api.one
@api.constrains('duration', 'no_of_installment', 'days_to_pay')
def validation_durations_digit(self):
if self.no_of_installment <= 0:
raise ValidationError(_('Warning! \n Number of Installment must be a value.'))
if self.duration <= 0:
raise ValidationError(_('Warning! \n Duration must be a value.'))
if self.days_to_pay <= 0:
raise ValidationError(_('Warning! \n Day(s) to Pay must be a value.'))
# @api.model
# def get_week_of_issue_integer(self):
# week_of_issue = 0
# if self.week_of_issue == 'one':
# week_of_issue = 1
# if self.week_of_issue == 'two':
# week_of_issue = 2
# if self.week_of_issue == 'three':
# week_of_issue = 3
# return week_of_issue
class payment_installment(models.Model):
_name = 'payment.installment'
_inherit = 'mail.thread'
_description = 'Payment Installment'
number = fields.Char('Payment Installment No',required=True)
name = fields.Char('Name')
type = fields.Many2one('payment.installment.type',string="Type")
invoice_id = fields.Many2one('account.invoice',string='Invoice no',required=True)
currency_id = fields.Many2one('res.currency',related='invoice_id.currency_id')
amount_due = fields.Monetary('Amount Due')
origin = fields.Char('Origin')
sale_order_id = fields.Many2one('sale.order', string="Sale Order")
@api.model
def create_payment_installment_monthly(self):
types = self.env['payment.installment.type'].search([])
for type in types:
invoices = self.env['account.invoice'].search([
('installment_type_id', '=', type.id),
('installment_count', '<', int(type.duration))
])
for invoice in invoices:
day_now = now.date().strftime('%Y-%m-%d')
if invoice.next_date_create_installment == day_now:
payment_installment = self.create_payment_installment(invoice.id)
activity_type_id = self.env['mail.activity.type'].search([('name', '=', 'Payment Installment')])
if not activity_type_id:
activity_type_id = self.env['mail.activity.type'].create(
{'name': 'Payment Installment', 'summary': 'Follow up Payment Installment Entries'})
model_id = self.env['ir.model'].search([('model', '=', 'payment.installment')])
activity_vals = {
'user_id': payment_installment.invoice_id.user_id and payment_installment.invoice_id.user_id.id or False,
'date_deadline': datetime.datetime.today(),
'activity_type_id': activity_type_id and activity_type_id[0].id,
'note': "<p>Payment Installment: " + str(payment_installment.name)+ " </p>",
'res_id': payment_installment.id,
'res_model': 'payment.installment',
'res_model_id': model_id.id,
'summary': activity_type_id.summary
}
self.env['mail.activity'].create(activity_vals)
@api.model
def create_payment_installment(self,invoice_id):
invoice = self.env['account.invoice'].browse(invoice_id)
st_count = str(invoice.installment_count + 1) if invoice.installment_count >= 9 else '0'+str(invoice.installment_count + 1)
number = 'ST' + st_count + '/' + invoice.number
vals = {
'name': number,
'number': number,
'type': invoice.installment_type_id.id,
'invoice_id': invoice.id,
'amount_due': (invoice.amount_total / int(invoice.installment_type_id.duration)),
'origin': invoice.origin,
'sale_order_id': invoice.sale_order_id and invoice.sale_order_id.id or False,
}
payment_installment = self.env['payment.installment'].create(vals)
invoice.installment_count += 1
invoice.next_date_create_installment = invoice.get_next_date_create_installment(now.date().strftime('%Y-%m-%d'))
return payment_installment
class paymentinstallment(models.Model):
_name = "payment.installment"
_inherit = ['payment.installment', 'mail.activity.mixin']
class sale_order(models.Model):
_inherit = "sale.order"
payment_installment_type = fields.Many2one('payment.installment.type', string="Payment Installment")
payment_installment_count = fields.Integer(string='number payment', compute='_compute_count_payment_installment')
@api.depends('payment_installment_type')
def _compute_count_payment_installment(self):
for order in self:
order.payment_installment_count = self.env['payment.installment'].sudo().search_count([('sale_order_id', '=', order.id)])
@api.multi
def action_installment(self):
True
class sale_advance_payment_inv(models.TransientModel):
_inherit = 'sale.advance.payment.inv'
payment_installment_type = fields.Many2one('payment.installment.type', string="Payment Installment")
@api.model
def default_get(self, fields):
result = super(sale_advance_payment_inv, self).default_get(fields)
active_id = self.env.context.get('active_id')
sale_order = self.env['sale.order'].browse(active_id)
if sale_order.payment_installment_type:
result.update({
'advance_payment_method': 'delivered',
'payment_installment_type' : sale_order.payment_installment_type.id
})
return result |
983,667 | 3a7633cfadb1e6be7e0dbf0a9d02dee287d6fed1 | from picamera import PiCamera
import time
import requests
import base64
key=""
with open('key.password', 'r') as file:
key = file.read().replace('\n', '')
start = time.time()
camera = PiCamera()
camera.resolution = (640, 480) # Model Resolution
camera.start_preview()
camera.capture('/home/pi/Desktop/image.jpg')
camera.stop_preview()
encoded_string = ""
with open("/home/pi/Desktop/image.jpg", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
url = "https://facemask-apim.azure-api.net/tensorpython37/HttpTrigger1?flag=read"
payload=encoded_string
headers = {
'Host': 'facemask-apim.azure-api.net',
'Ocp-Apim-Subscription-Key': key,
'Ocp-Apim-Trace': 'true',
'Content-Type': 'text/plain'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
print(time.time() - start)
|
983,668 | a871e8bfb8829a9be931d8e2af3b26841934b153 | from djblets.extensions.base import Extension, ExtensionManager
_extension_manager = None
def get_extension_manager():
global _extension_manager
if not _extension_manager:
_extension_manager = ExtensionManager("reviewboard.extensions")
return _extension_manager
|
983,669 | 9d898b5c328b932716d18076acaae4aed2428c54 | #inheritance
class Mammal:
def walk(self):
print("walk")
class Dog(Mammal): #dog inherits Mammal class, Thus Dog: child class , Mammal: parent class
pass #pass is written when the class is empty (no code written in it)
class Cat(Mammal):
def __init__(self):
print("I am a cat")
dog1=Dog()
dog1.walk()
cat1=Cat()
cat1.walk()
obj=Mammal()
obj.walk() |
983,670 | c9ad6774f8de428c1be37310b692285fdaa94ac2 | # Generated by Django 3.1.6 on 2021-02-18 00:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Pessoa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('endereco', models.CharField(blank=True, max_length=100, null=True, verbose_name='endereço')),
('numero', models.IntegerField(blank=True, null=True, verbose_name='número')),
('complemento', models.CharField(blank=True, max_length=100, null=True, verbose_name='complemento')),
('bairro', models.CharField(blank=True, max_length=100, null=True, verbose_name='bairro')),
('cep', models.CharField(blank=True, max_length=9, null=True, verbose_name='CEP')),
],
options={
'verbose_name': 'Pessoa',
'verbose_name_plural': 'Pessoas',
},
),
migrations.CreateModel(
name='PessoaFisica',
fields=[
('pessoa_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='crm.pessoa')),
('nome', models.CharField(max_length=100)),
('sobrenome', models.CharField(blank=True, max_length=100, null=True)),
('cpf', models.CharField(blank=True, max_length=11, null=True, unique=True, verbose_name='CPF')),
('rg', models.CharField(blank=True, max_length=11, null=True, verbose_name='RG')),
],
options={
'verbose_name': 'pessoa física',
'verbose_name_plural': 'pessoas físicas',
'ordering': ('nome',),
},
bases=('crm.pessoa',),
),
migrations.CreateModel(
name='PessoaJuridica',
fields=[
('pessoa_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='crm.pessoa')),
('razao_social', models.CharField(max_length=100, verbose_name='razão social')),
('nome_fantasia', models.CharField(blank=True, max_length=100, null=True, verbose_name='nome fantasia')),
('cnpj', models.CharField(blank=True, max_length=14, null=True, unique=True, verbose_name='CNPJ')),
('inscricao_estadual', models.CharField(blank=True, max_length=25, null=True, verbose_name='inscrição estadual')),
('inscricao_municipal', models.CharField(blank=True, max_length=25, null=True, verbose_name='inscrição municipal')),
],
options={
'verbose_name': 'pessoa jurídica',
'verbose_name_plural': 'pessoas jurídicas',
'ordering': ('razao_social',),
},
bases=('crm.pessoa',),
),
]
|
983,671 | b55eb916226bb90e6e23b558c3b2913d631f3e49 | import logging
import random
headers = {
'Accept': 'application/json, text/plain, */*',
'Accept-Language': 'zh-CN,zh;q = 0.8',
'Connection': 'keep-alive',
'Host': 'space.bilibili.com',
'Origin': 'https://space.bilibili.com',
'Referer': '',
'User-Agent': ''
}
proxies = {
'http': 'http://122.193.14.103:81',
'https': 'https://119.23.63.152:8118'
}
def setheaders():
agent = setagent()
headers['User-Agent'] = random.choice(agent)
headers['Referer'] = 'http://space.bilibili.com/' + str(random.randint(9000, 10000)) + '/'
return headers
def setproxy():
proxies_pool = []
with open('proxies/ipproxy.txt', 'r') as f:
for line in f:
proxies_pool.append(line.rstrip())
item = proxies_pool[random.randint(0, len(proxies_pool)-1)]
key = item.split()[0].split(':')[0]
value = item.split()[1]
proxies[key] = value
return proxies
def setagent():
uas = []
with open('proxies/useragent.txt', 'r') as f:
for line in f.readlines():
if line is not None:
uas.append(line.strip()[1:-1 - 1])
random.shuffle(uas)
return uas
def setlogger():
logging.getLogger('').setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
files = logging.FileHandler('log/example.log')
files.setLevel(logging.DEBUG)
files.setFormatter(formatter)
logging.getLogger('').addHandler(files)
|
983,672 | a66364a73c9dd565af57b8fc5cb1541ada86f66c | from django.conf.urls import patterns, url
from miniURL.views import URLCreate, URLUpdate, URLDelete
urlpatterns = patterns('miniURL.views',
#url(r'^accueil$', 'home', name='url_liste'),
#url(r'^nouvelle-url/$', 'nouvelle_url', name='url_nouveau'),
url(r'^redirection/(?P<mini_URL>.+)', 'redirection', name='url_redirection'),
url(r'^$', 'home', name='url_liste'),
url(r'^(?P<page>\d+)$', 'home', name='url_liste'),
url(r'^nouveau$', URLCreate.as_view(), name='url_nouveau'),
url(r'^edition/(?P<small_url>\w{6})$', URLUpdate.as_view(), name='url_update'),
url(r'^supprimer/(?P<small_url>\w{6})$', URLDelete.as_view(), name='url_delete'),
)
|
983,673 | 8b1e63ff819c85c5a75144d0d3456c50e8f973b0 | import time
import RPi.GPIO as io
io.setmode(io.BCM)
pir_pin = 18
io.setup(pir_pin, io.IN) # activate input
while True:
if io.input(pir_pin):
print("PIR ALARM!")
time.sleep(1)
|
983,674 | 4f30a6b3dac5dbfce9c63604701bc32e12327fd3 | #@author sebastian
class UnifromeD() :
#constructor
def __init__(self,n) :
#Atributos de clase
self.__n=n
#---------------------------
#Getters and setters
def getN(self) :
return self.__n
def setN(self,valor) :
self.__n = valor
#----------------------------------
#método que retorna la probabilidad de p(X=x)
def funcionProbabilidad(self, x) :
#formula
resultado = 1/(self.__n)
return resultado
#------------------------------------------------
#Método que retorna la probabilidad acumulada en un intervalo
def funcionDistribucion(self, i, f) :
resultado = 0.0
for x in range(i,f+1):
resultado = resultado + self.funcionProbabilidad(x)
return resultado
#---------------------------------------
#Indicadores
def media(self) :
acumulador=0
for x in range(1,self.__n):
acumulador+= x
return (1/self.__n)*acumulador
def varianza(self) :
acumulador=0
for x in range(1,self.__n):
acumulador+= (x-self.media())**2
return (1/self.__n)*acumulador
def desviacionEstandar(self) :
return self.varianza()**(0.5)
#----------------------------------------------------
|
983,675 | 26105a8f6743c9f5bded525cb34543200a374a61 | from django.urls import path
from app1 import views
app_name='app1'
urlpatterns = [
path('index/', views.index, name='index')
] |
983,676 | b7311717f121e0c62783d7db48813eb46a1591bf | from django.apps import AppConfig
class ServeupConfig(AppConfig):
name = 'ServeUp'
|
983,677 | 9715bd3889fea736c3d67506c9ca0c99b2367c14 | #!/usr/bin/env python
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import unittest
import numpy as np
import test_util as tu
import tritonclient.grpc as tritongrpcclient
import tritonclient.http as tritonhttpclient
from tritonclient.utils import InferenceServerException
from tritonclient.utils import cuda_shared_memory as cudashm
class QueryTest(tu.TestResultCollector):
def test_http(self):
triton_client = tritonhttpclient.InferenceServerClient("localhost:8000")
inputs = []
inputs.append(tritonhttpclient.InferInput("INPUT", [1], "UINT8"))
inputs[0].set_data_from_numpy(np.arange(1, dtype=np.uint8))
try:
triton_client.infer(model_name="query", inputs=inputs)
self.assertTrue(False, "expect error with query information")
except InferenceServerException as ex:
self.assertTrue("OUTPUT0 CPU 0" in ex.message())
self.assertTrue("OUTPUT1 CPU 0" in ex.message())
def test_http_shared_memory(self):
triton_client = tritonhttpclient.InferenceServerClient("localhost:8000")
inputs = []
inputs.append(tritonhttpclient.InferInput("INPUT", [1], "UINT8"))
inputs[0].set_data_from_numpy(np.arange(1, dtype=np.uint8))
# Set up CUDA shared memory for outputs
triton_client.unregister_system_shared_memory()
triton_client.unregister_cuda_shared_memory()
shm_op0_handle = cudashm.create_shared_memory_region("output0_data", 4, 0)
shm_op1_handle = cudashm.create_shared_memory_region("output1_data", 4, 0)
triton_client.register_cuda_shared_memory(
"output0_data", cudashm.get_raw_handle(shm_op0_handle), 0, 4
)
triton_client.register_cuda_shared_memory(
"output1_data", cudashm.get_raw_handle(shm_op1_handle), 0, 4
)
outputs = []
outputs.append(
tritonhttpclient.InferRequestedOutput("OUTPUT0", binary_data=True)
)
outputs[-1].set_shared_memory("output0_data", 4)
outputs.append(
tritonhttpclient.InferRequestedOutput("OUTPUT1", binary_data=True)
)
outputs[-1].set_shared_memory("output1_data", 4)
try:
triton_client.infer(model_name="query", inputs=inputs, outputs=outputs)
self.assertTrue(False, "expect error with query information")
except InferenceServerException as ex:
self.assertTrue("OUTPUT0 GPU 0" in ex.message())
self.assertTrue("OUTPUT1 GPU 0" in ex.message())
cudashm.destroy_shared_memory_region(shm_op0_handle)
cudashm.destroy_shared_memory_region(shm_op1_handle)
triton_client.unregister_system_shared_memory()
triton_client.unregister_cuda_shared_memory()
def test_http_out_of_shared_memory(self):
triton_client = tritonhttpclient.InferenceServerClient("localhost:8000")
inputs = []
inputs.append(tritonhttpclient.InferInput("INPUT", [1], "UINT8"))
inputs[0].set_data_from_numpy(np.arange(1, dtype=np.uint8))
# Set up too small CUDA shared memory for outputs, expect query
# returns default value
triton_client.unregister_system_shared_memory()
triton_client.unregister_cuda_shared_memory()
shm_op0_handle = cudashm.create_shared_memory_region("output0_data", 1, 0)
shm_op1_handle = cudashm.create_shared_memory_region("output1_data", 1, 0)
triton_client.register_cuda_shared_memory(
"output0_data", cudashm.get_raw_handle(shm_op0_handle), 0, 1
)
triton_client.register_cuda_shared_memory(
"output1_data", cudashm.get_raw_handle(shm_op1_handle), 0, 1
)
outputs = []
outputs.append(
tritonhttpclient.InferRequestedOutput("OUTPUT0", binary_data=True)
)
outputs[-1].set_shared_memory("output0_data", 1)
outputs.append(
tritonhttpclient.InferRequestedOutput("OUTPUT1", binary_data=True)
)
outputs[-1].set_shared_memory("output1_data", 1)
try:
triton_client.infer(model_name="query", inputs=inputs, outputs=outputs)
self.assertTrue(False, "expect error with query information")
except InferenceServerException as ex:
self.assertTrue("OUTPUT0 CPU 0" in ex.message())
self.assertTrue("OUTPUT1 CPU 0" in ex.message())
cudashm.destroy_shared_memory_region(shm_op0_handle)
cudashm.destroy_shared_memory_region(shm_op1_handle)
triton_client.unregister_system_shared_memory()
triton_client.unregister_cuda_shared_memory()
def test_grpc(self):
triton_client = tritongrpcclient.InferenceServerClient("localhost:8001")
inputs = []
inputs.append(tritongrpcclient.InferInput("INPUT", [1], "UINT8"))
inputs[0].set_data_from_numpy(np.arange(1, dtype=np.uint8))
try:
triton_client.infer(model_name="query", inputs=inputs)
self.assertTrue(False, "expect error with query information")
except InferenceServerException as ex:
self.assertTrue("OUTPUT0 CPU 0" in ex.message())
self.assertTrue("OUTPUT1 CPU 0" in ex.message())
def test_grpc_shared_memory(self):
triton_client = tritongrpcclient.InferenceServerClient("localhost:8001")
inputs = []
inputs.append(tritongrpcclient.InferInput("INPUT", [1], "UINT8"))
inputs[0].set_data_from_numpy(np.arange(1, dtype=np.uint8))
# Set up CUDA shared memory for outputs
triton_client.unregister_system_shared_memory()
triton_client.unregister_cuda_shared_memory()
shm_op0_handle = cudashm.create_shared_memory_region("output0_data", 4, 0)
shm_op1_handle = cudashm.create_shared_memory_region("output1_data", 4, 0)
triton_client.register_cuda_shared_memory(
"output0_data", cudashm.get_raw_handle(shm_op0_handle), 0, 4
)
triton_client.register_cuda_shared_memory(
"output1_data", cudashm.get_raw_handle(shm_op1_handle), 0, 4
)
outputs = []
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT0"))
outputs[-1].set_shared_memory("output0_data", 4)
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT1"))
outputs[-1].set_shared_memory("output1_data", 4)
try:
triton_client.infer(model_name="query", inputs=inputs, outputs=outputs)
self.assertTrue(False, "expect error with query information")
except InferenceServerException as ex:
self.assertTrue("OUTPUT0 GPU 0" in ex.message())
self.assertTrue("OUTPUT1 GPU 0" in ex.message())
cudashm.destroy_shared_memory_region(shm_op0_handle)
cudashm.destroy_shared_memory_region(shm_op1_handle)
triton_client.unregister_system_shared_memory()
triton_client.unregister_cuda_shared_memory()
def test_grpc_out_of_shared_memory(self):
triton_client = tritongrpcclient.InferenceServerClient("localhost:8001")
inputs = []
inputs.append(tritongrpcclient.InferInput("INPUT", [1], "UINT8"))
inputs[0].set_data_from_numpy(np.arange(1, dtype=np.uint8))
# Set up too small CUDA shared memory for outputs, expect query
# returns default value
triton_client.unregister_system_shared_memory()
triton_client.unregister_cuda_shared_memory()
shm_op0_handle = cudashm.create_shared_memory_region("output0_data", 1, 0)
shm_op1_handle = cudashm.create_shared_memory_region("output1_data", 1, 0)
triton_client.register_cuda_shared_memory(
"output0_data", cudashm.get_raw_handle(shm_op0_handle), 0, 1
)
triton_client.register_cuda_shared_memory(
"output1_data", cudashm.get_raw_handle(shm_op1_handle), 0, 1
)
outputs = []
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT0"))
outputs[-1].set_shared_memory("output0_data", 1)
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT1"))
outputs[-1].set_shared_memory("output1_data", 1)
try:
triton_client.infer(model_name="query", inputs=inputs, outputs=outputs)
self.assertTrue(False, "expect error with query information")
except InferenceServerException as ex:
self.assertTrue("OUTPUT0 CPU 0" in ex.message())
self.assertTrue("OUTPUT1 CPU 0" in ex.message())
cudashm.destroy_shared_memory_region(shm_op0_handle)
cudashm.destroy_shared_memory_region(shm_op1_handle)
triton_client.unregister_system_shared_memory()
triton_client.unregister_cuda_shared_memory()
if __name__ == "__main__":
unittest.main()
|
983,678 | 3f933b8401083966625e5c37f89cc46639c6f7f0 | bZ&?&iuaKoRwlu^TomBmsh<<s0z0A
z1?2gSExgA2lpY?-_=@cSyY#d`wS!>@o?SKp9(n5y;-e<o=-mra_RkWDoS`^tg4;`(
z|33g~K$X80UTja`#r8ygO#56oGn-mFnb3hxzGEq|;B;<EB9}{C1(?&m+F@?91D8rH
z&ZLuj68ZV%q*J;>^}{$iNp7#*3yHj-J(%w+&de`nW(KNj@2{7Wx!ntyE6)+0$2-Dn
z?hA)_(S7L%FO)wm+z&j$%jJ)y|Fm%bFY?=j`*DZ(vGmz0JRf<47wW%NxL<aJ*S*iQ
z_8n4kw_q<npIppK-e<KqP5Nux+H6zHN$z8<@1^SIQ`UD?#reNb_<dKpQQVo6f2TD%
zvQp-(ujKt+lhc2{(c%o5g7r5;-)%xWY#Y-K+r}t2lUEn6^m1pe7(Q)CD_6x?`$^v4
zHaS^&Y#XyYXh-7A$=ikGy>E*X^a~MTzYrOtUr>0#d6X(ISjX#|?KCoLXZsz&dkDkd
zWu^te_D`fBIJuhy?O`B~f|~Rta`(=sW$z&*3L?(@9<^RS^ZUyp4!#%dY{#MRN;{UL
z^8INCy9|8?+gQ4VLrT;&YK?sMhg!XuU-EX;-;Y?|yVjQ<d>7j}pRjt-pZ?{q$|g4(
zZ(}>=&xW5_UYs#b0@AKS!}04oc5W4|8`I<3owy6rLnq?c-k0E+`!m^$<Qx^1pV_TD
zU!L&!1<g6u>RI^eCkZ%y=<I&w2OMvl$v^V30|$M~GyJzi>2K`#9ZBc&xOOJy7Yv#G
zdN`$?>caGMeV($mdghY-`^s|*v&pRS9=x`_^z}^TGFRIEz*dii#mLZ+=PWm!$}BDP
zb<OWc8ea@Z4j+7ETR)Qdkq2Y8_wVrCd0qPW9edsL<pC#nZee}zUbCKTal+@&7O&ek
zGB+oB4m03n`4&B|!E^=ro*XmZlahQdF6|PP_ka`R2j%JVV{v-@MCCHzWcrEhZE+g?
z_AV{R&UH69+54;_J7c=Kd{~^Y-dMao-?DS(4NjJCQTbuIf_#tFUMyad@45Mr<Ewa1
zkROz%%a6tB^%IrLfRpJbvbV+Q`q^EJvhf8tZNIo{QPyt*r<0dX?h)ySoIsvPKjgIi
z(mQf_$@ys}rxq~BNzP9*IlVk{KY{*!6*%oVzsoiPc`m4Z9*y#bS7IFH4S{xCCcT)Q
zPhXjs*UVRU?@fs6L7x-VzrpuZlCu*Lja>%M9g}+|`m7^ooHKQi)BTHa4|&nBR@{6y
zyRd$ve*c%johH~PW^fxf$hdyG+I>Edu88ZWE8;c?zAI&N>H;9g1%>j0VtM*@X4+4G
zP84w3_Jf?PJY?ziIZ?U>Cn`6=Ic+8<w6jp&`i=VY#2{T!xdnw>KV6YsCL*%@ZV;Sb
zH_%VWZHNf-E954&2=gK2f`a{qCMT1(exv@o9Kea{eLuff>(;>_zalOu<R&JCb{3r1
zZ*s!&5XuV*<@NI+v!9d`e3xXPpO7QNk%K%L*Ow=&2SUyme}_S*@mIo42*<$*;rO;e
z$o1vP%2&t*g`6>7itHjBXC{Q>LZ6f6SI7m0+=Oskn2`4O5n(=r+=Q^d-yq~Br2U`y
zz%98vwAa*$501$j?Q|G+;_L)FwQGr7gBwS#F%Or>-N+bDbH6ZF{>I_q{$VWMsF?Hz
z4*icpI^TDUQJ;izgJa4y`Zrm<vlr|%-<Q?9@#LEKP2=Tny!tdgKUV#i5|*QoGwvTG
zb~f)@#;9*&@^BwBMmwJx&u$T+-GrQZUo(b1#^lZWwXx*enZxNmKv+Mv2;29T@ycUT
zSRQgtod4WnVs38m9>M6Rb8Y$M{{)_B#_F|U%yj;Hd5K=e?}B5dLwRXBM#FaaA#d-<
zn06SG501eP>X-c|pLzYq;0N_&O!?#RMs}GmR&%`3{>QVA{imyW`;5tN7?Tf<!7uJz
zS{SQ87?U^TkJYaK0Di3Y+~-HyrT)Ih%=b9$%*e->{8;VM&}XQgYQ`y3|8eTIq5nAT
z@(<vdhUrjl?6>FHd=Kn2PW$--__5ljk#9M_toc5|AYSl%)a3O0yDd(0zR9xYdkcej
zp<dHMy{3hFIeiyl+59k;NH2O&z9T2l%jtUz%jO53M0(K!_Z>NbUQYXimu)uO@tjOA
zy3vT^CN+9V_602ChBzsw(`#{Qmt%W0;w5?wIDuY$&as^t@xt^(?acTfh9*5x`@s)u
z2sp=fVSac+l%A+v7#sUE>51z5fRm-?SdVQs5b~Tpz5eJzs~^W1%Zu85Fw1M;Ok!Do
z|8DTcI^;OKv934{@7NxUc!@m+oS^)W)Ab9+hf6eiiP{Z*07c3P^b)ln{Gf@H6Qt|d
z&TKZO@|;XB(YRu4q}1poYLB>aRLbe}GClwrR4u}C8wC5!j18Gi`Lx+6%5#Ez8XGJ%
z>51$*;3WO(=te^aPWya)*=D1m11FG|n%|wD)qXGD;<WP);57SJO-{cb4LI#NgUGEH
zaq7-GM^2PqgLD3F#u~RadT2Spdz&i1UdS6GpF|Jf#Cjkv)x#K3C3*lS)&qH|9{PQA
zzzOT&w6GrP_pbpbOm|9{uKs)#;DqT$W)ivid`dJwB>d>(7UsTZ?Ts=nUNWB?aDsDM
zk<-0zv7E^{xBE<PWC4idB<G3&r!SX6#__xwBVJ^$!l+_SoYr26U5m1Hpk0fyb(z5j
z9z~pN-D20G@lJxiewdsjzkP0l?mb_N)2=rRxJjXYlUn_3Hp=syV7_CsL7(Sj^U?%-
zK+!m|Y*F)mCgGJ^H3xp}Co+A8lF*b_Hj|yrETxCLdOMDOw|Oy@O!vE>Od|D%{%%b%
z{qMH@f$44g1Jbj;<7=CyK3(g(zGMCV>XEmB9k`2=i7TSccDQ;o|Dj<GKk^}O39oC%
zSI0K5eJIx<e_Io?eQW8@Cp~-J=5R8R-M;Jc(7M0|KHYQX5?SNyvFZ7%lhMq={OncE
z>7TcG`*JduN+#xZCKvO`9A~dlxjX0gCP#lO*K0bPn6(#R|NA+|^6bYuw)^QN=kLp%
zv$p)t#XFvtw>9DXe17YE+EM=I#9}@+q5qzIWLh>~yl}_#$b^=$i=$rCYs;**@tLK3
zb}2ud&m|Lkoy-67%zS!2z304SI+;rtPp}r9_(*1tB~&VJOC}di_N!6GXerF^OyHvV
zTz)CBFsOoyA&D$)zEqJRj#t|889cWmxjVEqnNKVl@-{DI7MF64<#_hGXl{P*ywSwY
zG0yulet#}2!BLKJ+D0;GN$Cd-{@)iDvKMGupZj%Y(?Vt!Ti^TL>HI=+t}if^nRDpR
zw(m4V4V40)9zt5XpQcHVsK#)S`CYu6y?sN?CGv@x9m$2v?4V9*^fwbV&n_`e_;BvG
zw+&91G`Nd%iEMVzcRI<9F62i<+wpzQPBhDJn$K@dWYy|rYp>jFsxLK~&-H(?*Z4&;
zkp|JWdL(o6nYsDdor%ROoYLQ%T1sCL&14pa1ln=C`$kZEDW;!>a%1~>Yyd`0a?tCo
z#tCbV_UJdMVTj?*Ue`}3Ih!A}MBuq*ZTI`B-OlrrtxF5}`AxY*dN$=eei@v@y3jP9
z)1#TM8`a8joHgvSIg`CgBWF{;$Q{SiXfAWPgT9;hW*zgp^QvspN&b%H9(qg;WB(yV
z9Jwutg~g<HT%X##Gq)r-|23MLcg$Zu|IYJ~iF0QA1KA=Uzdf5@Ls&h|nT}3xUzqEU
ztQviXC{E5L{rk2(Lz3*}P|BVgjB)A2!pxPK+#Kno?f8qf&TP-Z<iIKYKGz?J)A^mL
zeqS@U$l4#d^S4bo+ON-TnNQD6_JtWIak1sm=g!+qROC5hD4a?!W|OnJ{G5}|_wP{j
zaxb1wpEh9!GrT!24X_=_#mrJ}s7JM(4^Hjq=S~-&>yIza<!*4B`}4az(=QnD{keWl
zSFg?V0<1mE_()<`azP+xy+)4NK|9Wu2i%0id`wv<(Don3ZTGZsHbUQ6bMr<qB^>)(
zi`Vu4qw5Zi<I`l)ILCThBA3ft*)Lpe{|LO6J2yR^T$2~vpY`qFc%P_w-+9=Ew(*<$
z63CFk=@E63W27TkpBTt7P76$G?ym>(Y$^-GtMyU;+)?HA&xcfA|6E7q_4n5f%SV#-
zr{#?$FV03XetJxPYE0hw`vjwWbWDDm^ZaF`A91$NDDP~aQQkSfqr9^{9hNJl{iY*-
zUNYZj&sp09tmCGIoMzqJ;vy1mI++*g*XKm~^*ND#eNL*MX5HE1wmEbC`L^Twr8!^h
zceF!2r^C9Q_Inw+^_lIN?8urB!%gLl`P|UGrtSS$VsU=f4rb(T+A=Ll-?WSVJ~y05
z&n>1BR}81j%zrm6EnKl9xtzHoxoJMJ$mGWawx^Nd((TJTH=WqEFyN9#0bVhm-oxkH
z2DX+sZ0%&s`kltnE$5xPkldS0=bi0%UM8PW1|5bwd)+yUv&r<_&RjCNIhkHuT10~m
zmOge3)Y*&c4<Tc9a7!PJ%=c&1_JnacrT0G~C8zWTw_PadKQ&FZ{I>7jy_n4JG)kVU
zmqVPn+RAX#^Lx^Xd|zX;p=G#rzcntsJL9+xwP(qwJ;rkxV~knSJjdzZtSv3%jnxt3
zwv=CQ+jLcaFca7HL&yzE#bG_Lf47>?EzN4zQ7=j?EF}%~7`sfR^AO9WlONn3CWac6
z{k*u}a6u3kPX*a=gHIYd=sSoff^6@52A?{#vZmowfwukrVAWACZ%|ns^oR94t^U(S
z+e+HeET2zKXBXx*{ob%JTm1%0SX<_k3v-(E`u8^~<{-~%!WwSM_?%U8&e1LzqYV#Z
z?0V(k)Ofbu8)leIZr?5Fug*=+arQFC5hp#SH1el*k7~lU<BR4O=glsT<p%u*f$i(T
zTyi?U#6FMOVN9t921JTWFF7|q+pl|D66WI?gc*KY@=DCG#WLLgOWm8mM^=^T!?(BK
zf)W+Q1vP4e8$!B4Kt)MXSvrt(iX`0)J!n#$x=D(z)UB>1oyIL7n|p8&H`E{q45GxA
zO~<7~1O$PoC=7xIltpERfQZYWe$P4YTUGZ~ovj*X{@>*HtA5_T_c_Zw%X{|sh%A%^
z#}rTSsA@)-`tbXMj7SwKZcI1ux`Yxog&$it(w`X11?~B~KZP2rOK(|@9M&yEdc7vM
zwewuON^33`?c3mQVywXHU)pvmvM%^$L;v04PdpchC8kbZI?~5WWE-=T{v>jVDY5XJ
z7sVmsosvJ6(!0>>b-VsA7us+OVB*>IY<XSH=3h)KaT(eZ`QD*Dp~YK!`$ar^dhncZ
z#6trw;-R*Fbodh~%Q}S|5nAT?%FU9ty!H8+iNd5Y-ol@nW=5lbCZPNVwCn8UIcRPA
zh<(N=t{5z4P31t2a+#uz??lB5!+gnrZXXCTXxh)|^Jil*oi??nvb{~@wdH*vFLY^6
z+mEkYoIw@R)Gm~(n5~^LjvK=5iz(fs?TfxXfG3xP!yYQrS)?H+l)fLtt&gcbhuSAo
z{L8$Vl)3+iem;5*`P7(h{IoMwD6hCE^IvpV!z2x_F9^05vr(dYT!q0}#t@#kZN-p2
z9f`bAuMW!l)8qPirHr^~M_;e${a|9}fO#Jd@p}9MsHXT3FJ5%xQIRyQ|5d`Z)Tc_g
zPQ1$OUWjdM|05fB3_QwiLp#U3-i(-rGK4!=6i2)4;r9De9f_Qwf0M|d=Af-foow8P
zIaEZ)Dpe?!7jDzj=K3hR<{OJs+x(o|B$v0;^C-V-d>qMcntzK=riM~e7zd80jPc%-
zM;E1-DTL=Zl<9!>NhCv8aw0Utq5r;c1{ub`*>G1~)2|&F$hN1^EpS)|KUCi?R{b%y
z2h1zS&f?#uY*I|>4#o~$uPLv*-d&xnH2N1Ft(thz6(LhO>+}OOr&B1s=JA^>EY*9i
ztm8KxW?>Ls??xGq)O5!#aci3;zk5ZQ9ZMA|{R|5~FE{Uv<iV=jQfO%hjq!~q&@iE<
zg%iS*AJtxyi5G5djQ&~oITBA?%~s_a!W|C#sOLqswUuWYj|ipW=LX8xJ=@9?rC>f%
z^pxdvEXev&8QRAOa_}6Yn&!cJdA3BW|ClG{FUv#|Zy?BSfy(r4;rEJEnogv*B+}b`
zLp#)$%J_-g&{!f<Wvb_q%;Sex*GN|y`XG}qw>NSe*6-&gLW<rv9;oxx!Vfih=6R>e
z%#zXH&1e_LQ(XJ2jw7*By5neQoM^)nr8GR(<HQq_Z9RUbc7adluD8?dTIH@M9c)Xa
zqrC<5crwTp#0{bY+)P;7b@VID`4FU%ruqNgbQ&Y&v~|894!t4j+E7BYOB?*OWxsVy
z9x!07V>oAk`dLU#6oX=3Izaa)S@B%m|4XN9C)RYl(0!TqeJ|kG5_P=ly`A=bNmBph
z5-IfPFt~K!m&c)T^FE8-Od1+Ul^VMCGne;ksOo-!xck%HUp`KvbfDCmP#n6roB914
zYF(K5Lt@}O;aXm*?mB{*2Hb5)<fdHWMOw4qClwy+`G{1qG6Ue0zMC>*!PHcc8O|lL
zF7c8*a0Fv7_xVDv!@URL%b+#o&}iSIvOgb9YdVpm@vLEgAPrFM@#1@69rQphSv_8*
z&!1Y3$-nY~x_Uk&m&1q<LwB=3{XufjACIz);VC>ehhN)z+W4_`rt%WvtvB&%zst5>
zqxinMh`WSSjlFX^R-8EbzrJz**BktoO~U%)t>w4hpQ0llmi<MszD}ooO+$|7`YPWu
zIAre^?urdN`<wQ~)!UgdacoKi5<2Upsh4G6g&3fFBebsdT;7I6zI|-0C{F2vj@%Rz
zHA##b^5*`Xj2{vcE;ePN$!_}o^4D1RpOw=~(M{EslRk%gU23`*S|0CHmnXVRdYkv1
zTY3FO^S;5M;<j$%e(<V!L*4sM)BE;qd1Lr^Z}cDUjsC}`vVFrfH+`#k#|Js{xh9Oi
z*E`%x)n7Hsr;cyNd|o_0S)Gtq`kxq^9LlCJNUWZ9)A|#eOr#1a^SLL%gNBm*4&!{O
z@GnTD{rs3Iev;QdF(DQ+#c9y63l)DXB~~CJ)8ran_=Kg2SARlM&8t$>@2f*F)qA;i
z>Yy*jQsrehb9gj{>Bhi1ExoqwYm@20C!h`eAs-sEKVew^DD&cpl<1y^cKzwvGZCuG
z{(^d@nm6c=`6wwlw|xsLwqbtFy04-mRj6<*<0)5O-G^xUzefL59;tZ86sNMLbf6Pp
z_H+%M=QYNwJtxbE_4|gh{+MXHkym2qW97>hxN8POb3wK5+a62lL(q)h#EO721dU#*
zlnnLz`Vyw*g>y*%{h}V2!>i9pO}(qjNu8do%}LF7)|EsxPhSJ9d}BP(Hq@weEsxs}
z^?Nb1>e9~CZJRKp%TTId<zWP{&ChL2Oj+6q(wE{qLfX*olzF!OP-CGT7Up!d#lLR*
zhps;En%Wkm_32MD1K~5WmhdVWuGSX`vHdn;oI%TrCkm65T{UW6WItc4o>%d&GgaBB
zW8`g3Wu17vQ;7*b6iH3^GEp4*9Xw76GKEAclaHR-QMX4>z<FQgy?TC59_^H|_w${B
zHvQ4R=!{Jz%f|(@JV7#jk9fZ&RhX3XriOXCqt`})8`)?|cd>cR&uzePKam@o+*OW)
z`g=i5@-9L-h&7Kx$!nothsF{adwZse&*pyn`T3AL(S^+0+O=AKZ5YP*)r4ReCuH(s
z)vgd70qFaar1kRB*q5I(+5hP%j*nLv`uOtxmIzVVPeG?Q?H7Vfh^g%NM5CtVZI<T|
zoa1L*_pR(tkXdB+H$2~LO2^8s1j~1e`QNOV|F!UX^P4hh6e}cWhd-H^P6b8tdK8r+
zlQX^k>-4iRd6reL2f`G+ezfbwu4!K^YENj#S+dz6f?6MyeULlt!0$)%mzeNF2f)<h
zm-?A9efUGgsi{QHw4V~a5MEj^uUCj(fh$im(qXb%4^Hq0Q_a<-<I!uYOtHrWt?&ST
zv5okrJ@T{6qgCyLRLmHv>%CYv73`|DLltir%Gghe)1>*y0K@XIX+#x22(nK5Y^0v-
z*}Ty>4<whV&Gtkj50yQW9}jX<`tZcOQF-r$D2cZ9rNJO5nC7X(lDKi+uaL-@)`?`i
zdy{@5EqpTcw*rg(!ysd{Gbrpe+2#jL?+0?`^ZOWW8Qr&M%=RA@$FlY5IWRP6T^9^8
z)4l8M{?+QGqt|w${3HsrN?WaqsB#_c?Q~pK?N8WUMO&6#>EpRRd@{!LJ(c>*T3F4S
znMxb&K5}RkKMej1`!i9)lq%jdjb2B_Fbb@#@S1*(oV~R5&o(6TUDH7t&6l4^`WdbM
zqVw%TlZj+t+y@;Q*R0d0pBSGe$x~yw*%)NRMvDkjA3tNg9xLW@C@wv*nqYN&qa6gX
z&TV@iSq#yKV_a{*`<h$pb?F|mzgJeg!rq|X3}1u(mFIu;-f7|=ipn0uKZb_HH-CiH
zznUrWM(h%*?3MOBJ@!g1v2pmW@)U^13!T31TW_~BBzMNrA?$v!0BCJ5#5FV~JBHPL
zn1iT%ksYI&{vN(Ey0KK*-W3Z$M=+zJ8{<KrX7``ZAzI6cu=7~URgMFKRP8d3Iz8(~
z@tXJr*?ey%G|Copn*%xPzQnQ}6gaM1TGodXrK)({iF~0p?&@)bu*<DqZ^HW=%Cr9b
zLy+A?N1LllNXmX+E|>~LOIb_S@H_qSjsDK)?jChLL=S0fyX?%A4V|m!xu$|?AAOdc
ziCof8vJ$YT_n@DbmdY>=@N?6CvOkgAo);Eax@P>bqQRcVWCG&{7uT9{H@qdBHQM-z
zJQlXFpog6HC2)GR#k=H~rPI%kwUrZ2J-%;Gi`g9ZTa_*9@w)G4Z7y}?*2S&v`-J<h
z>BLx39J$9>I@syw@_XP<tgo}K>Z^EUJv3BFG&~AALR0dN8P5&*xs>%>Riu9xZ`04O
zR?fMIvgapl`ats0GUS7dQ#>TUU7p8x;+I!XZ1Xgoj`p6)c2T*Xe_YWww`Uc;(A1R~
zSHpZ#Y%1BE7(-Px&(Dj^(x!Rjn)zu|zfSXfA|3h&Pmk-yH&Y3WHB%YC!d2^Ev6pjK
zz96kZX8l|t<>%RjmcO~bJDr%wo4#wvXI3bt%5uHAf4En)iJf9uR3EP7wNJ?5WFi^)
zd`*7x?()9WHsodXqqO`@qCWVxb%)LUa*@;;J|$^S%;w)<F(Ws5@Pw{4U&-QbGzLn2
zN1oD>gCSjof1#w;`p5MG&DKONF=hY$>iU2=-J|u*?(c9=#CU^k>gOH!ax5ABLS^~t
zKVD9$i2gvqxBFYWu5^s9+_JyY{(`A|3!dnEhl1DpS19H!^(w6QO!ZV8k`ZRftobi&
zaEYOwHC(zsp&i9<S0Od#7|$G?Ye3tcmm0=Jzty;-o}B$VYTdOblZN!J=gS?X*7$Y$
z+lmtd8PH_*TlwXgGN*bNTCcP5sho04B`4BDO_H%6QRgnw^qAYTh`YL`$ZdY3&L2Zl
zrLWgIe<Dp-W9d-SwAFr%dwOb`D9ikU!k=){rJa{mxU04e+Wvv+`y)-8dw9yGu7PqC
zZTj59RWyYSm6vD}=^n1C$!sK@Yn#wU%LV0AXG(U$6S-Y`82?Dq+*mmgvtM%kJNf~x
z?&T7~p1ZG1x=Gg&Tbm8}6IIRb>8I+lHj;jdu5m-<wzlijX!%h$H4T;@<(8Vp^lg1t
zrcw9dE=~jSk9K)n`h(H*S9Dbys{iV))E>7t#;%rq{ldY&N>|EdzTLsUaF@=yp5WkL
zsO#21yQ3N!G*pl4hX4)LW5v**arf0BL&N1**_G9$-{$@ip>d*R-!xBDEsT|&e9Cdp
z(7;AxzqlVI#jw87nVRkolJ@gD#Ivuz5YN7@-d{``*AMaDcwPzbH>4cSi}lrC)2N<r
zaOBDKbU9Bfcuwcp3YD9|l=Dh4c{<VI{4e}*xVI#Rwy)rK&Di!ws8;6G^I1HJY2isD
zCJy^?H)T>gioW~0;Lv13Oj^f+tguhT0|&kgucMesCoSiKtL}GoO>Og&NwEkq&Kng1
zvyaO0YSj}m>SXXj>3)Xp&DvA<yig-2ulFQ9Lh-lyp>p4oC`|gfV`;y$uq!1yyZMIG
zk#m4|!wm<+iQI&5upfH#T3SI8{laSQ(H3>xympY4r)D{7LOqioL)0Bsr%yJYN(UM1
zxm@N&PP41)*%TU-@Gb-0{0#4NR5&n%A7Lx=tFpKgwRvLkSLsZhFPeQ%vG(@bE}PoD
z^Zu)tY5!Ga`)p{Vs=EI1e&2j}N2@7*%Kez_s_mGKlv~^WP1XL6&Ia~VsN9bn>wf<%
zU8Ue~Kc*-g*_^PS+%4=V+x)HaE0f3rK{jeBP?>MyM2HNbj&7R2R0g7~C+p-6Ub0bO
z_}>0vLF8aPS;OyH*AuHHE8D4JZaOs`<m%a){7EP4sG9F0l}xn$kXqA}o1S%ayGp;X
zXON^5Pb1p>xe9j0l&Ud4b-Syz{?{1{vGH8&ZH4(U!+yXBZ@q~Z&sA(@80^1;7vu|3
zqO$z13;U=x2&Q#c4MFKp<?i?(XWXCLvrgo&FhC?%sgq)2!8hC&+_O%akr0hXUhh)>
z9ijoQH5g(GhiqP$`>A5TP-lRSv@n~D!k3w<UZAb4@8~2U?x6z`2r;0$xn1@_^{m@S
zAyk7jez(eR6K^<B-9)ALU-lpK(89rn!A@vlIZ0Y}&*neU>1_#{9a2j7%9T7WyoU0>
zeE&dCqI$uTT3?B`a*j+~a#qaxIc}y8GsdgVmA-r_ZMEq9h5i&DM;C9G0}$PsvW3ga
z^ESDcoxTmRmHivp!>L5wlrCt`MTx50JEhNuYK>|yh+D*Kb-b?sqG}H|)q1DxmsM#(
zfeIsX=5{{9H|VPfk6Ro~{`J@F*{Worjz3hS%X@;(RDKM@{M;_P{_I)TmGm<dBh$Q3
zs6NOpjdefbI=6F8&@;8$snq=_+6$)Kc&PSwXn2bz0kwQf0;=6rIw;z<LNm^*RMGBY
zT6^vq`NO;5IFBl~#yRpu84J15x!X{j3NjNxF+b>!r4y;CSR#{5iE)0uO{+>Y{e(nL
zyKzq+@A7`esB$r+ds)AVO_McklzQBrOyvr@FesW3HUo7<SsOpmH<4DUWy8bPT^#x~
z=<n*(h+0pEd%ZPoEcsUFLfv1V5;j4t<(ltvjBLSn>DS1Vsk4|C{ZqKxL`K??FO{-#
z^tz*u7Kdn1t47A~VkA?XsxOPjb+xyT9DDp$UHZ^DQHs!@KQ-iA_LGa=tK5btL*W#4
z(;sr=-{~idS=k?{-tef?8#N^|Z|X1ftgG8iTS-@4`=8x~I4GQH^<SqjtSTFM2fYWr
zrZIQn182<bLfPU=Me!T&%dGHO?NxidjrvnoUISUW(Arh+iVGtn#l{$pWQWRDqwa}c
zQ~MqC5yr9GpxT3sp@b{@onj>}*qN!Q8H*i(L4TW=OI6V|{0Y%!<~Gt2RAg|Vo;S-G
zT<E`_<Aus9FKSHs;-IWUx*fe%YA9i1E83}0_cRRoCg+$d&1-$>YRFx+_}INwkH<t~
zUq0L@jfK;w(e*xG7pI2tsd7lFY`5BnJ7~_`?r%q`V4W`-_H%3ny(*dV{{%;SaglS8
zeC$KlUw4V8;F;TnvOf*abM)6%88r_*7>EA3oY$-2nZ8Rq-mHBrDp=(|Y+pw`?9_gz
zrrB}fThI3hex-?R&m?6N>ZFHb{gTTSLH2$f-BrsNJ2v(+#P?tjw!Nwg4E-L`8-4he
zjJ&3FJh13(^kbkvS<SngQ(GrzyQs*T<ITLOM0UeqXcDW=xzhc{py1P)d%0d%^+Ed`
zNT<k`*wTxZL8(tE3jTI!17$oAT@cyESlVx^`?f~HXe^%X<IvINV9L(5{Zu(1?3&6J
zb}5?`cX!oXG<vG--$|pz^_p0txA?|OwAu~L1H}4OZH?(WW$lui_v%b<Y2BIn6ZJDc
zrtgeu1KYWMo_8oa(a+Pe*3i`#Xd#gqMa?~no49)^Edz>?Tp=hV(jz&4Je85R){lsb
zjc9J69G5})QVi)j+Tp`sg)4V6wykfYQJy^=$YnJpl;hMwu5!jq?O&%Dor!T-rFAN2
zs>weauhw5p_?x(wP9K3WVt7=Xpm!tbU_zvzBBEkce0({%H2ISe+ZfRA@s(kWRo_3A
zaA-b=_5}|4>2W@|i`25zchXv7d0ke-e5a3pbG|k|RY5=lSoMU;U8J$l<#%6u2bD`*
z`z6MaWritz>Xp+|oS&~f5?t3+daCpPW!q7vH30qhirLPdm}#oj<SyLwn0P!_SJP7I
z-I?^US{rNhVqa(7(w1_bFTFFdD?gkoTIXfjlP4AP1$)1WzMI(gabMYA)x<$#>7)1(
zOM9933zdlr*YD~N^<dRJ!9WK*Ud&HA^`mKW+O^+ZnV+sJ{I+yfS*nZvsl$WrBP&M3
z{!~`%2jiO72kcdvRWqd@&w@GE2eIChmiC!_-ntj+y=lDEhp(QyG{tvN=zBji=JVXX
z*dbNBUTBVg-KfvmYoq=N4RO^xBupYlX5BFU&&w{iX&f%v2_;?07rO!F4zQZBnJzxE
zzc=J(lGTMr;Zk~GUhk9Zcd3(+IuWCJx_J2oxh}U!Y&dhaZ!1Pz^?hbjJt|Mxsp^x9
zJJ<aW(jJ5`#zET%t>u<OiQEFC=amQ8cKemAJ+_^M!MMx%3-Ngs$)O#p_>ac8HxpTX
za<kjO>nn-mE_9&CeM+R9<9u`^zN~57zf&DiZ9kM88OP;ef5jY{ZQfec6GS<fKz^b*
z6P3njw^HnKGTEzC_(!Hm{LE0<B!}8v9ZlEY+%OOqcBONbow~a9EODbnJzL+ONMyyf
zO;`W>`!|J^odd5tVAAj^TZ+y5V#Iz2`@Uv;Kksa;a<%iLJy9Pu?SghkvpGNA_A{5`
zp$bCH7rW+qGf97ju1K`@!$Pxr#(gWOXJY9wbfu+sKBpi~9a-mn=vr7dm{<s1l&X(s
zc?~Za83JnkPo&d+x@OT<>p$(*i>#hiS6##;n<Za)-B+&oMyCuM`IdTE$2ab4m51Dp
z6H(B+)P*B-<ZvKQCG=M=5i`V#_@v+JU76kG`@}(u-0&m{VK6n7Dhx?eNbc^3rbI5A
zJuvt~ZqOkZ6E%x=lezk>8E-50r}f(EU*w82J#TObjd6Ja+j+l0bpEWqhowtjUg@2x
zT}U?7pYjeIS=;0!s@TwosE2B}{`)oz;`^NL<>Pb<S64@O@^Wa|YZjeU`@DnXi|>l8
z>g#=}{%OV=X^dp6<K$W$WANG#zsiJKWUGzFFNgeYU_2RdSHDf#;IX<sM(3A{a=x%y
zlFAzo+e0&yDt|T%4i3p#y553Cf7M%((~sw9qe*CMf%YC0T0aZ3^z~D{dt38el?0}f
zZ!UYmZQm1(rCzUb51{OIo#A(9cdahxjTG+I?V-E&>fJGo##48mPZvKEzc-UAq!MW{
zp;bxpit*=0zsGVftL}T&HTXRyzWRGwjo(8Xbi(rk=6Uq?bh?hLX%E_mJ496KmTT1U
zP_T9LgO&Y%)K{g`_>ql)LerS%kL!HVq-%(0S;tZ__2Da`YYU%QwsBN-kGX!niTjAV
zvVPgFuwlF#bKynyS@ZcfligDAHEe_b!?|5l#tR}oWBrD8Qy$lGE0sVI5kn5U-FBrr
zm;PC-JT9kshw#8q&3EJV=F2A)Y5QH}9IN9#Jj9DnIGXD<pX-f~HS-Tn{$Yq*M^y33
zqrxy*Sr4S0fvA^C-lU%qtG0<rw|H)?rzv^zM7zU2ozQLwIrl53gq`*`gzu-3oBYiC
z<mlcLJ5Nk@BaI6wxt(4(%G16<+j*dHd_rHM`)=uVm7?`GQt!)~8Xf9?th=Xj0g9%)
z)t#PDmXBaujJxDal4*XBdDeSugkGuyU9ZX<-O<4m&3E<Y(H}+*Oq37PX?VO&gh-Sx
z>vkBAs@K2P@%mRx-YvADfqR7z53ynuq=Si~%ld@mcR|6q^Sd&FB+TL@wx!akLfwaI
z+=;3%Fy5EQ7kXm$@iX+7T)3@WmD8s`FNK@u4aAIXYNCjdrkt6v+Hoki>W#XqPEnfY
zRi({=nj@WBp3oYh4X>K{n`{xyvoKp-U@r|kxzV&tRPx{v-vMX5Uu |
983,679 | 0b814aac745be13c070dfdc06172141e19b113dc | #! /usr/bin/env python3
import yaq_serial # type: ignore
from yaqd_core import ContinuousHardware
class WlMotor(ContinuousHardware):
_kind = "wl-motor"
defaults = {"baudrate": 57600}
def __init__(self, name, config, config_filepath):
super().__init__(name, config, config_filepath)
self.limit_status = False
self._port = yaq_serial.YaqSerial(
config["com_port"], baudrate=config["baudrate"]
)
self._limits = config.get("limits", [(-100_000, 100_000)])
def _set_position(self, position):
self._port.write(str(position).encode() + b"\n")
async def update_state(self):
overflow = b""
async for line in self._port.areadlines():
line = line.replace(b"S", b"").replace(b"E", b"")
try:
self._position = float(line)
self._busy = False
self._destintation = self._position
except:
pass
def home(self):
self._busy = True
self._port.write(b"H")
if __name__ == "__main__":
WlMotor.main()
|
983,680 | e339206c9a53ac8858ca94be6601ec974fa8bd4b | def getInfo(request):
data = request.session
return {
'code':0,
'data':data['data']
} |
983,681 | 74c5a3a74f07ba3a1f81966e2094b732cd3d1c70 | from django.contrib import admin
from tt.models import Aktivnost,ZavrseneAktivnosti,Student,Module,Updates
admin.site.register(Student)
#admin.site.register(Aktivnost)
admin.site.register(ZavrseneAktivnosti)
admin.site.register(Updates)
class AktivnostInline(admin.TabularInline):
model = Aktivnost
extra = 2
class ModuleAdmin(admin.ModelAdmin):
fieldset = [
(None, {'fields':['title']}),
]
inlines = [AktivnostInline]
admin.site.register(Module,ModuleAdmin) |
983,682 | 01740c7845dc18d772365fe57e4021594cc64a9c | # 1. 查询城市 ID
# 2. 退出
# 1. 河北省
# 2. 陕西
# 请输入省份
import json
def load_cities():
with open('./day9/cities.json', 'r', encoding='utf-8', newline='') as f:
cities_json = f.read()
cities = json.loads(cities_json)
cities_list = cities['result']
return cities_list
def find_id_with_city():
cities = load_cities()
for j in ['province', 'city', 'district']:
l = []
for k in cities:
if k[j] not in l:
print(k[j], end=' ')
l.append(k[j])
print()
name = input(f'请输入{j}:')
l = []
for i in cities:
if i[j]==name:
l.append(i)
cities = l
return l[0]['id']
id = find_id_with_city()
print(f'id为{id}')
|
983,683 | 651377c7104e73d4aa880f84b803ddc060e772c9 | /home/runner/.cache/pip/pool/b6/95/c4/7d6e3c414127cce2d533d8255c8e8c2d7dd55fafdaf8399bce3221bc5d |
983,684 | 0192cc4dcd0585e9849a73e55c23c9997595d5d2 | import csv
spamreader = csv.reader(open('transactions.csv'), delimiter=',')
for row in spamreader:
print ','.join(row)
|
983,685 | 198521dd934de1539d367df37e9de78884535ac4 | a,b,x=map(int,input().split())
ax=(a-1)//x
bx=b//x
print(bx-ax) |
983,686 | a8db3189f5ece04d841321006107725daf4e0045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from btsbots.BTSBotsClient import BTSBotsClient
import json
class TradeBots(BTSBotsClient):
def __init__(self, *args, **argv):
super().__init__(*args, **argv)
self.bots_config = {}
self.bots_limit = {}
self.local_price = {}
# keep the cancel orders id to check if it's already done
self.cancel_orders = []
self.update_price_time = {}
self.my_balance = {}
self.prices = {}
self.asset_blacklist = []
self.bots_factory = {}
self.register_bots('mm1', self.run_bots_mm1)
def register_bots(self, name, coro):
self.bots_factory[name] = coro
async def build_cancel_order(self, order_id):
self.cancel_orders.append({order_id: self.head_block})
return await super().build_cancel_order(order_id)
def onProfile(self, id, fields):
print('update profile')
p = fields['profile']
if 'bots_config' in p:
self.bots_config = json.loads(p['bots_config'])
if 'bots_limit' in p:
self.bots_limit = json.loads(p['bots_limit'])
for _key in self.bots_limit:
self.bots_limit[_key] = float(self.bots_limit[_key])
if 'local_price' in p:
self.local_price = json.loads(p['local_price'])
for _key in self.local_price:
self.local_price[_key][0] = float(self.local_price[_key][0])
def get_my_balance(self, a, t):
if a not in self.my_balance:
return 0.0
return self.my_balance[a][t]
def add_my_balance(self, a, b0, b1):
if a not in self.my_balance:
self.my_balance[a] = [0.0, 0.0, 0.0]
self.my_balance[a][1] += float(b0)
self.my_balance[a][2] += float(b1)
def get_orders_mine(self, a_s, a_b):
_key = (a_s, a_b)
if _key not in self.orders_mine:
return []
return self.orders_mine[_key]
async def cancel_all_order(self):
ops = []
for market in self.orders_mine:
for _e in self.orders_mine[market]:
if _e['t'] == 7:
_op = await self.build_cancel_order(_e['id'])
ops.append(_op)
await self.build_transaction(ops)
def _get_price(self, a_s):
if a_s in self.prices:
if a_s.find("POLONIEX:USD") != -1:
return self.prices[a_s]*self.prices['USD']
if a_s.find("POLONIEX:BTC") != -1:
return self.prices[a_s]*self.prices['BTC']
return self.prices[a_s]
return 0
def get_price(self, a_s):
scale = 1.0
asset_ref = a_s
asset_refs = []
while asset_ref in self.local_price and self.local_price[asset_ref][1] not in asset_refs:
scale *= self.local_price[asset_ref][0]
asset_ref = self.local_price[asset_ref][1]
asset_refs.append(asset_ref)
return scale*self._get_price(asset_ref)
def init_bots_data(self):
self.orders_mine = {}
self.orders_all = {}
# there are 3 balance for each asset:
# 0. total balance include: free, in order, in colle
# 1. free balance, can use directely
# 2. usable balance by bots, include: free, in order.
self.my_balance = {}
self.prices = {}
cancel_done = True
for e in self.ddp_client.find('price', selector={}):
self.prices[e['a']] = float(e['p'])
for e in self.ddp_client.find('balance', selector={'u': self.account}):
b = float(e['b'])
#
self.my_balance[e['a']] = [b, b, b]
for e in self.ddp_client.find('order', selector={}):
# settlement
if e['t'] == 4:
if e['u'] == self.account:
self. add_my_balance(e['a'], -e['b'], -e['b'])
# in order book
elif e['t'] == 7:
_key = (e['a_s'], e['a_b'])
if _key not in self.orders_all:
self.orders_all[_key] = []
self.orders_all[_key].append(e)
if e['u'] == self.account:
if _key not in self.orders_mine:
self.orders_mine[_key] = []
# if a cancel fail, wait 20 blocks, then clear it, try again
if e['id'] in self.cancel_orders and \
self.head_block - self.cancel_orders[e['id']] < 20:
cancel_done = False
self.orders_mine[_key].append(e)
self.add_my_balance(e['a_s'], -e['b_s'], 0.0)
# debt
elif e['t'] == 8:
if e['u'] == self.account:
self.add_my_balance(e['a_c'], -e['b_c'], -e['b_c'])
self.add_my_balance(e['a_d'], e['b_d'], e['b_d'])
for _key in self.orders_all:
self.orders_all[_key].sort(key=lambda x: x['p'])
if cancel_done:
self.cancel_orders = []
async def check_asset_invalid(self, a_s):
# in black list
if a_s in self.asset_blacklist:
return 0
p_s = self.get_price(a_s)
# no price
if p_s == 0:
return 0
if a_s in self.ai:
return p_s
ret = await self.get_asset([a_s])
if not ret[0]:
self.asset_blacklist.append(a_s)
print('[warnning] can not get asset info of %s, blacklist it' % a_s)
return 0
self.ai[a_s] = ret[0]
return p_s
async def trade_asset(self, ops_bots, a_s):
controller = {}
bots_config = self.bots_config
p_s = await self.check_asset_invalid(a_s)
if p_s == 0:
return
bUsable = self.get_my_balance(a_s, 1)
bUsable2 = self.get_my_balance(a_s, 2)
if a_s == 'BTS':
keep_fees = 50/p_s # keep 50 cny fees
bUsable -= keep_fees
bUsable2 -= keep_fees
if bUsable < 0:
bUsable = 0
if bUsable2 < 0:
bUsable2 = 0
controller = {'b_usable': bUsable, 'price': p_s, 'market': {}}
balance_total_order = 0.0
# the first loop, calculate how many balance need to buy
for a_b in bots_config[a_s]:
p_b = await self.check_asset_invalid(a_b)
if p_b == 0:
continue
if a_b in bots_config and a_s in bots_config[a_b]:
sp1 = 1+float(bots_config[a_s][a_b]['spread'])/100.0
sp2 = 1+float(bots_config[a_b][a_s]['spread'])/100.0
if sp1*sp2 < 1.0:
print('[warnning] wrong spread for market %s/%s' % (a_s, a_b))
continue
balance_limit_buy = float('inf')
if a_b in self.bots_limit:
balance_limit_buy = float(self.bots_limit[a_b])
if 'balance_limit' in bots_config[a_s][a_b]:
balance_limit_buy = float(bots_config[a_s][a_b]['balance_limit'])
balance_limit_buy -= self.get_my_balance(a_b, 0)*p_b
balance_limit_buy = max(balance_limit_buy, 0.0)
balance_limit_order = min(
balance_limit_buy, float(bots_config[a_s][a_b]["balance_cny"]))
balance_total_order += balance_limit_order
controller['market'][a_b] = {
'price': p_b, 'cancel': [], 'balance_limit_buy': balance_limit_buy,
'balance_limit_order': balance_limit_order}
# the second loop, calculate how many balance have for sell
b_scale = 1.0
if balance_total_order > bUsable2*p_s:
b_scale *= bUsable2*p_s/balance_total_order
for a_b in controller['market']:
balance_limit_sell = controller['market'][a_b]["balance_limit_order"]*b_scale
balance_limit_sell = max(balance_limit_sell, 10.0)
p_b = controller['market'][a_b]['price']
controller['market'][a_b]["balance_limit_order"] /= p_b
controller['market'][a_b]["balance_limit_buy"] /= p_b
controller['market'][a_b]["balance_limit_sell"] = balance_limit_sell/p_s
default_bots = 'mm1'
if 't' not in bots_config[a_s][a_b]:
bots_config[a_s][a_b]['t'] = default_bots
bots_type = bots_config[a_s][a_b]['t']
if bots_type in self.bots_factory:
await self.bots_factory[bots_type](ops_bots, controller, a_s, a_b)
async def trade_bots(self):
self.init_bots_data()
# print(self.my_balance)
# print(self.orders_mine)
if self.get_my_balance('BTS', 1) < 1:
print('[warnning] need more BTS for fees, try cancel all orders')
await self.cancel_all_order()
return
if len(self.cancel_orders) > 0:
return
ops_bots = {'cancel': [], 'new': []}
for a_s in self.bots_config:
await self.trade_asset(ops_bots, a_s)
await self.build_transaction(ops_bots['cancel'] + ops_bots['new'])
async def bots_cancel_order(self, ops_bots, e, controller, a_s, a_b):
print('[cancel order] %s/%s, id: %s' % (a_s, a_b, e['id']))
controller['market'][a_b]['cancel'].append(e['id'])
controller['b_usable'] += e['b_s']
ops_bots['cancel'].append(await self.build_cancel_order(e['id']))
async def bots_new_order(self, ops_bots, controller, amount, price, a_s, a_b):
print('[new order] %s/%s, %s %s at price %s' % (
a_s, a_b, amount, a_s, price))
controller['b_usable'] -= amount
ops_bots['new'].append(
await self.build_limit_order(amount, price, a_s, a_b))
async def check_order(
self, ops_bots, controller, a_s, a_b, price,
freq=60, max_price=float('inf'), max_change=0.003):
found = False
price_in_cny = controller['price']
amount = min(
controller['market'][a_b]['balance_limit_buy']/price,
controller['market'][a_b]['balance_limit_sell'])
orders = self.get_orders_mine(a_s, a_b)
for e in orders:
# already in canceled list
if e['id'] in controller['market'][a_b]['cancel']:
continue
if found:
await self.bots_cancel_order(ops_bots, e, controller, a_s, a_b)
print('reason: extra order')
continue
if amount <= 0.0 or \
float(e['b_s'])/amount > 1.1 or \
float(e['b_s'])/amount < 0.9 and controller['b_usable'] > 1.0/price_in_cny:
await self.bots_cancel_order(ops_bots, e, controller, a_s, a_b)
print('reason: balance %s change to %s' % (e['b_s'], amount))
continue
_key = (a_s, a_b)
if _key not in self.update_price_time:
self.update_price_time[_key] = 0
if self.head_time-self.update_price_time[_key] > freq:
if abs(float(e['p'])/price-1) > max_change or \
e['p'] > max_price:
await self.bots_cancel_order(ops_bots, e, controller, a_s, a_b)
print('reason: price %s change to %s' % (e['p'], price))
self.update_price_time[_key] = self.head_time
continue
found = True
if found:
return
# not valid order exit, make a new orders
amount = min(amount, controller['b_usable'])
if amount*price_in_cny < 1.0: # too small, less than 1 CNY, don't sell
return
await self.bots_new_order(ops_bots, controller, amount, price, a_s, a_b)
async def run_bots_mm1(self, ops_bots, controller, a_s, a_b):
spread = float(self.bots_config[a_s][a_b]['spread'])/100.0
price = controller['price']/controller['market'][a_b]['price']*(1+spread)
await self.check_order(ops_bots, controller, a_s, a_b, price, 30)
# async def test_fee(self):
# ops = []
# op1 = await self.build_limit_order(100, 1, 'BTS', 'CNY')
# # op2 = await self.build_limit_order(50, 2, 'CNY', 'BTS')
# # op3 = await self.build_cancel_order(0)
# ops = [op1]
# await self.build_transaction(ops)
if __name__ == '__main__':
try:
import asyncio
except ImportError:
import trollius as asyncio
# import getpass
account = 'test.iauth'
wifkey = "5HvPnGfqMDrrdBGrtn2xRy1MQGbVgW5m8EWmXUNHBX9W4DzVGyM"
# account = input('account name: ').strip()
# wifkey = getpass.getpass('active private key for %s:' % account)
client = TradeBots('wss://btsbots.com/websocket', debug=False)
# client = TradeBots('ws://localhost:3000/websocket', debug=False)
client.login(account, wifkey)
loop = asyncio.get_event_loop()
loop.run_until_complete(client.run())
loop.run_forever()
|
983,687 | e855253cf8edbfbc3828022ed9b604cfdf704f54 | from celery import chain
from django import forms
from apps.uploadfile.models import Fileupload
from apps.uploadfile.task import getname, generate_report,sendfile, deletefile
class UploadForm(forms.ModelForm):
class Meta:
model = Fileupload
fields=['filename','docfile']
def save(self, commit=True):
file = super(UploadForm, self).save(commit=False)
if commit:
file.save()
#Funcion que realiza actividades en cadena
(getname.s(file.id) | generate_report.si() | sendfile.s())()
|
983,688 | bb079db05af05614c1e13e8efb55d718a22fbf1b | # -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import ShuffleSplit, cross_validate
def crossvalidate_pipeline_scores(X, y, pipelines, n_splits, random_state):
"""crossvalidates all pipelines in the provided dictionary and returns scores (R2, neg-MAE, neg-MRE)
:param X: Dataframe with the predictors
:type X: dict
:param y: Pandas series with the target values
:type y: series
:param pipelines: dictionary with the name of the model as key and pipeline as value
:type pipelines: dict
:param n_splits: how many splits to do in crossvalidation
:type n_splits: int
:param random_state: random state for splitting
:type random_state: int
:return: Dataframe with scores calculated for each fold and model
:rtype: dataframe
"""
cv = ShuffleSplit(n_splits=n_splits, random_state=random_state)
scores = {}
for modelname, pipeline in pipelines.items():
print("Crossvalidating", modelname)
score = cross_validate(
pipeline,
X,
y,
cv=cv,
scoring=("r2", "neg_mean_absolute_error", "neg_mean_squared_error"),
)
scores.update({modelname: score})
# opening the nested dictionary to a dataframe
scores = pd.concat({k: pd.DataFrame(v).T for k, v in scores.items()}, axis=0)
scores.index.names = "model", "metric"
scores.reset_index(inplace=True)
scores = pd.melt(scores, id_vars=["model", "metric"], var_name="fold")
scores = scores.assign(fold=scores.fold + 1)
return scores
def plot_scores(scores, show_costs=False, save=False, plotname=None):
"""Generates BoxPlots for all metrics
:param scores: Dataframe with columns model, metric, fold and value (output from crossvalidate_pipelines)
:type scores: dataframe
:param show_cost: Plot the computation cost metrics
:type show_cost: boolean
:param save: Save created plots to reports/figures/
:type show_cost: boolean
"""
for metric in scores.metric.drop_duplicates():
if not show_costs:
if metric not in [
"test_r2",
"test_neg_mean_absolute_error",
"test_neg_mean_squared_error",
]:
continue
sns.boxplot(x="model", y="value", data=scores[scores.metric == metric])
plt.title(metric)
plt.tight_layout()
if save:
plt.savefig(
os.path.join("reports", "figures", plotname + "_" + metric + ".png")
)
plt.show()
def train_and_plot_prediction_metrics(X_train, y_train, X_test, y_test, pipelines):
"""Trains the pipelines with train data, predict test data with trained
models and the plots MAE, MSE and R2 metrics
:param X_train: Training data features
:type X_train: dataframe
:param y_train: Training data target
:type y_train: array
:param y_test: Test data target
:type y_test: array
:param pipelines: dictionary with the name of the model as key and pipeline as value
:type pipelines: dict
"""
scores = pd.DataFrame(columns=["Model", "MAE", "MSE", "R2"])
for modelname, pipeline in pipelines.items():
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
scores = scores.append(
{"Model": modelname, "MAE": mae, "MSE": mse, "R2": r2}, ignore_index=True
)
for metric in ["MAE", "MSE", "R2"]:
ax = sns.barplot(x="Model", y=metric, data=scores)
ax.set_ylim(bottom=0)
plt.title("Test data: " + metric)
plt.show()
|
983,689 | 63cad887af3ca0381571d70c3e90d38bd9c545d3 | """
Pair Problem - 2/2/2016
Ozzie & Emily
Generating Fibonacci numbers is a classic programming exercise.
Each Fibonacci number is the sum of the two previous. Starting with 1 and 2, we have fib(1) == 1, fib(2) == 1, fib(3) == 2, fib(4) == 3, fib(5) == 5, and fib(6) == 8. A little further on, fib(12) == 144.
Write a function definition skeleton with doctests that cover all the examples given above. Run the doctests to confirm that your tests fail.
Fill in the function definition to calculate Fibonacci numbers. Run your doctests to confirm that the tests pass.
If you have time, think about edge cases, and try to make your implementation more efficient.
"""
fib_dict = {1: 1, 2: 1}
def fib(n):
"""
>>> fib(1)
1
>>> fib(2)
1
>>> fib(3)
2
>>> fib(4)
3
>>> fib(5)
5
>>> fib(6)
8
>>> fib(12)
144
"""
## Do some informal memoization to cut down on recursion depth for
## subsequent calls.
if n in fib_dict:
return fib_dict[n]
result = fib(n-1) + fib(n-2)
fib_dict[n]=result
return result
if __name__ == "__main__":
import doctest
doctest.testmod(verbose = True)
|
983,690 | e9de2ee14d05a7a7020a16b76cada8541bf348dd | # -*- coding: utf-8 -*-
"""
Created on Fri May 1 2020
@author: Daniel Hryniewski
Pig Latin - Pig Latin is a game of alterations played on the English language game. Read Wikipedia for more information on rules.
"""
def piglatin(l):
if l[0] == 'a' or l[0] == 'e' or l[0] == 'i' or l[0] == 'o' or l[0] == 'u' or l[0] == 'y':
l = l + '-way'
else:
l = l[1:] + '-' + l[0] + 'ay'
return l
def main():
string = input('Enter your string: ')
print(piglatin(string))
main() |
983,691 | 32eddfa90863c3a21418d0ae9929e85472ce11c0 | """[5/7/2014] Challenge #161 [Medium] Appointing Workers"""
# seems like a backtracking problem
s = """6
GUI
Documentation
Finances
Frontend
Backend
Support
Alice GUI,Backend,Support
Bill Finances,Backend
Cath Documentation,Finances
Jack Documentation,Frontend,Support
Michael Frontend
Steve Documentation,Backend"""
s = s.split("\n")
count = int(s[0])
jobs = s[1 : count + 1]
workers_jobs = s[count + 1 :]
workers_jobs = {
wj.split(" ")[0]: {"ava": wj.split(" ")[1].split(","), "picked": None, "recent": 0}
for wj in workers_jobs
}
# theory:
"""
start with the person with least capabilities
if (no capabalities):
- revert the last action in the queue
- remove the job from his list == BAD
- remove his picked attirbute
- increase his recent counter, so that he picks a different job on the next loop
- whoever called the revert action, gets his recent counter set to 0
pick him a job (based on his recent visit)
- push this action into a queue
- make the job unvailable
if all has picked one job: break
and repeat
"""
unavailable_jobs = []
def revert(caller):
# reverts the last action
last_job = unavailable_jobs.pop()
worker, jobs = list(
filter(lambda x: x[1]["picked"] == last_job, workers_jobs.items())
)[0]
workers_jobs[worker]["picked"] = None
workers_jobs[worker]["recent"] += 1
# whoever calls needs to have its recent (visited) index set to zero
workers_jobs[caller]["recent"] = 0
def check_all_picked():
return all([each["picked"] for each in workers_jobs.values()])
def pick_a_job():
while not check_all_picked():
worker_job = min(
filter(lambda x: not x[1]["picked"], workers_jobs.items()),
key=lambda x: len(set(x[1]["ava"]).difference(set(unavailable_jobs))),
)
# TO SEE revert in action,,, disable ABOVE `.difference`
worker, jobs = worker_job
try:
picked = list(filter(lambda x: x not in unavailable_jobs, jobs["ava"]))[
workers_jobs[worker]["recent"]
]
unavailable_jobs.append(picked)
workers_jobs[worker]["picked"] = picked
except:
print(f"{worker} wants to revert previous action", end="\t^^^^^^^^^^^^\n")
try:
revert(caller=worker)
except IndexError:
print("There is no solution!")
break
pick_a_job()
print(
[
(each[0], each[1]["picked"])
for each in filter(lambda x: x[1]["picked"], workers_jobs.items())
]
)
|
983,692 | f01a27a27327ed2cd48afee7d040b91efa437d1e | #!/usr/bin/python
import pytumblr
import yaml
import os
import urlparse
import code
import oauth2 as oauth
yaml_path = os.path.expanduser('~') + '/.tumblr'
if not os.path.exists(yaml_path):
tokens = new_oauth(yaml_path)
else:
yaml_file = open(yaml_path, "r")
tokens = yaml.safe_load(yaml_file)
yaml_file.close()
client = pytumblr.TumblrRestClient(
tokens['consumer_key'],
tokens['consumer_secret'],
tokens['oauth_token'],
tokens['oauth_token_secret']
)
# Make the request
followers = client.followers(tokens['blog_url']+'.tumblr.com', limit=1, offset=0)
print followers['total_users']
#print followers['users'][0]['name']
#print followers['users']
total_followers = followers['total_users']
followersdiv = total_followers // 20
followersmod = total_followers % 20
#print followersdiv
#print followersmod
#print ""
current_offsetgroup = 0
counted_followers = 0
countdown_followers = total_followers
while 1 :
followersoffset = current_offsetgroup * 20
# print "Followers offset: {}".format(followersoffset)
if current_offsetgroup < followersdiv:
rangelimit = 20
else:
rangelimit = followersmod
# print "Rangelimit: {}".format(rangelimit)
# followers = client.followers('lazytechsupport.tumblr.com', limit=rangelimit, offset=followersoffset)
followers = client.followers(tokens['blog_url']+'.tumblr.com', limit=rangelimit, offset=followersoffset)
for x in range(0, rangelimit):
counted_followers += 1
# print "Group: {}".format(current_offsetgroup)
# print "Follower: {}".format(x)
try:
print followers['users'][x]['name']
except:
break
# countdown_followers -= 1
# print countdown_followers
current_offsetgroup += 1
if current_offsetgroup == (followersdiv+1):
if counted_followers != followers['total_users']:
print ""
print "There's a bug somewhere, pester James"
print ""
print "Reported : {}".format(total_followers)
print "Counted : {}".format(counted_followers)
break
|
983,693 | c81ce38ee8e4c0528919bb2305bb444d24e132ca | from iris.coords import DimCoord
import iris.plot as iplt
import time
import numpy as np
import glob
import iris
import iris.coord_categorisation
import iris.analysis
import subprocess
import os
import iris.quickplot as qplt
import matplotlib.pyplot as plt
import numpy.ma as ma
import running_mean as rm
import running_mean_post as rmp
from scipy import signal
import scipy
import scipy.stats
import numpy as np
import statsmodels.api as sm
import running_mean_post
from scipy.interpolate import interp1d
from datetime import datetime, timedelta
import cartopy.crs as ccrs
import iris.analysis.cartography
import numpy.ma as ma
import scipy.interpolate
import gc
import pickle
import biggus
import seawater
import cartopy.feature as cfeature
import statsmodels.api as sm
import iris.analysis.stats
###
#Filter
###
def butter_lowpass(lowcut, fs, order=5):
nyq = fs
low = lowcut/nyq
b, a = scipy.signal.butter(order, low , btype='high',analog = False)
return b, a
def butter_highpass(highcut, fs, order=5):
nyq = fs
high = highcut/nyq
b, a = scipy.signal.butter(order, high , btype='low',analog = False)
return b, a
b1, a1 = butter_lowpass(1.0/100.0, 1.0,2)
b2, a2 = butter_lowpass(10.0/100.0, 1.0,2)
def unique_models(directory,var):
models = []
files = glob.glob(directory+'/*_'+var+'_piControl*.nc')
for file in files:
models.append(file.split('/')[-1].split('_')[0])
return np.unique(models)
tos_dir = '/data/NAS-ph290/ph290/cmip5/reynolds_data/'
sos_dir = '/data/NAS-ph290/ph290/cmip5/reynolds_data/'
uo_dir = '/media/usb_external1/cmip5/gulf_stream_analysis/regridded/'
models1 = unique_models(tos_dir,'tos')
models2 = unique_models(sos_dir,'sos')
models3 = unique_models(uo_dir,'uo')
models = np.intersect1d(models1,models2)
models = np.intersect1d(models,models3)
#main section
def time_space_correlations(cube,timeseries,cube_year,ts_year):
years = np.intersect1d(cube_year,ts_year)
years = np.intersect1d(ts_year,years)
ind1 = np.in1d(cube_year,years)
ind2 = np.in1d(ts_year,years)
cube1 = cube[ind1]
ts = timeseries[ind2]
time = iris.coords.DimCoord(range(0, np.size(ts), 1), standard_name='time', units='seconds')
latitude = iris.coords.DimCoord(range(-90, 90, 1), standard_name='latitude', units='degrees')
longitude = iris.coords.DimCoord(range(0, 360, 1), standard_name='longitude', units='degrees')
new_cube = iris.cube.Cube(np.zeros((np.size(ts),180, 360), np.float32),standard_name='sea_surface_temperature', long_name='Sea Surface Temperature', var_name='tos', units='K',dim_coords_and_dims=[(time,0), (latitude, 1), (longitude, 2)])
new_cube.data = cube1.data
analysis_cube = new_cube.copy()
analysis_cube = iris.analysis.maths.multiply(analysis_cube, 0.0)
ts2 = np.swapaxes(np.swapaxes(np.tile(ts,[analysis_cube.shape[1],analysis_cube.shape[2],1]),1,2),0,1)
analysis_cube = iris.analysis.maths.add(analysis_cube, ts2)
return iris.analysis.stats.pearsonr(new_cube, analysis_cube, corr_coords=[str(new_cube.coord(dimensions = 0).standard_name)])
gs_west = -60
gs_east = -50
gs_south = 30
gs_north = 45
models = list(models)
#removing duplicate models from an individual modelling centre
models.remove('CMCC-CESM')
models.remove('CMCC-CMS')
models.remove('CNRM-CM5-2')
models.remove('IPSL-CM5A-LR')
models.remove('IPSL-CM5B-LR')
models = np.array(models)
data = {}
for model in models:
print model
data[model] = {}
###########################
# eastward velocities #
###########################
cube = iris.load_cube(uo_dir+model+'_uo_piControl*.nc')
if np.size(cube.shape) == 4:
cube = cube[:,0,:,:]
cube = cube.intersection(longitude = (gs_west, gs_east))
cube = cube.intersection(latitude = (gs_south, gs_north))
max_gulf_stream_strength = np.abs(np.ma.max(np.ma.max(cube.data,axis = 1),axis = 1))
data[model]['gulf_stream_strength'] = max_gulf_stream_strength
coord = cube.coord('time')
dt = coord.units.num2date(coord.points)
year = np.array([coord.units.num2date(value).year for value in coord.points])
data[model]['gulf_stream_year'] = year
###########################
# tos #
###########################
cube = iris.load_cube(tos_dir+model+'_tos_piControl*.nc')
cube.data = scipy.signal.filtfilt(b1, a1, cube.data,axis = 0)
coord = cube.coord('time')
dt = coord.units.num2date(coord.points)
year_tos = np.array([coord.units.num2date(value).year for value in coord.points])
gs = scipy.signal.filtfilt(b1, a1, data[model]['gulf_stream_strength'],axis = 0)
data[model]['annual_gs_tos_correlation'] = time_space_correlations(cube,gs,year_tos,data[model]['gulf_stream_year'])
cube.data = scipy.signal.filtfilt(b2, a2,cube.data ,axis = 0)
gs = scipy.signal.filtfilt(b2, a2, gs,axis = 0)
data[model]['decadal_gs_tos_correlation'] = time_space_correlations(cube,gs,year_tos,data[model]['gulf_stream_year'])
###########################
# sos #
###########################
cube = iris.load_cube(sos_dir+model+'_sos_piControl*.nc')
cube.data = scipy.signal.filtfilt(b1, a1, cube.data,axis = 0)
coord = cube.coord('time')
dt = coord.units.num2date(coord.points)
year_sos = np.array([coord.units.num2date(value).year for value in coord.points])
gs = scipy.signal.filtfilt(b1, a1, data[model]['gulf_stream_strength'],axis = 0)
data[model]['annual_gs_sos_correlation'] = time_space_correlations(cube,gs,year_sos,data[model]['gulf_stream_year'])
cube.data = scipy.signal.filtfilt(b2, a2,cube.data ,axis = 0)
gs = scipy.signal.filtfilt(b2, a2, gs,axis = 0)
data[model]['decadal_gs_sos_correlation'] = time_space_correlations(cube,gs,year_sos,data[model]['gulf_stream_year'])
def mean_cubes(data,models,variable):
cube = data[models[0]][variable].copy()
cube_data = cube.data.copy() * 0.0
i = 0
for model in models:
i += 1
print model
cube_data += data[model][variable].data
cube.data = cube_data
return cube / i
plt.close('all')
x = 'decadal_gs_tos_correlation'
qplt.contourf(mean_cubes(data,models,x),np.linspace(-0.5,0.5,31))
plt.gca().coastlines()
plt.title(x)
plt.savefig('/home/ph290/Documents/figures/'+x+'.png')
plt.close('all')
x = 'decadal_gs_sos_correlation'
qplt.contourf(mean_cubes(data,models,x),np.linspace(-0.5,0.5,31))
plt.gca().coastlines()
plt.title(x)
plt.savefig('/home/ph290/Documents/figures/'+x+'.png')
plt.close('all')
x = 'annual_gs_sos_correlation'
qplt.contourf(mean_cubes(data,models,x),np.linspace(-0.5,0.5,31))
plt.gca().coastlines()
plt.title(x)
plt.savefig('/home/ph290/Documents/figures/'+x+'.png')
plt.close('all')
x = 'annual_gs_tos_correlation'
qplt.contourf(mean_cubes(data,models,x),np.linspace(-0.5,0.5,31))
plt.gca().coastlines()
plt.title(x)
plt.savefig('/home/ph290/Documents/figures/'+x+'.png')
|
983,694 | a65238c3cfded5318374e1db49f96231239b4387 |
#############################
## Plotting Functions ##
#############################
"""This script provides a variety of useful functions for plotting.
AUTHOR:
Don Kuettel <don.kuettel@gmail.com>
Univeristy of Colorado-Boulder - ORCCA
1) get_cmap
2) labelLine
3) labelLines
4)
"""
# Import Modules
import matplotlib.pyplot as plt
import numpy as np
from math import atan2,degrees
# Define Functions
# =====================================================================
# 1)
def get_cmap(n, name='jet'):
"""Returns a function that maps each index in 0, 1, ..., n-1
to a distinct RGB color; the keyword argument name must be a
standard mpl colormap name."""
return plt.cm.get_cmap(name, n)
# =====================================================================
# =====================================================================
# 2)
def labelLine(line, x, label=None, align=True, **kwargs):
"""Label line with line2D label data"""
ax = line.get_axes()
xdata = line.get_xdata()
ydata = line.get_ydata()
if (x < xdata[0]) or (x > xdata[-1]):
print('x label location is outside data range!')
return
# Find corresponding y coordinate and angle of the line
ip = 1
for i in range(len(xdata)):
if x < xdata[i]:
ip = i
break
y = ydata[ip-1] + (ydata[ip]-ydata[ip-1])*(x-xdata[ip-1])/(xdata[ip]-xdata[ip-1])
if not label:
label = line.get_label()
if align:
#Compute the slope
dx = xdata[ip] - xdata[ip-1]
dy = ydata[ip] - ydata[ip-1]
ang = degrees(atan2(dy,dx))
#Transform to screen co-ordinates
pt = np.array([x,y]).reshape((1,2))
trans_angle = ax.transData.transform_angles(np.array((ang,)),pt)[0]
else:
trans_angle = 0
#Set a bunch of keyword arguments
if 'color' not in kwargs:
kwargs['color'] = line.get_color()
if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):
kwargs['ha'] = 'center'
if ('verticalalignment' not in kwargs) and ('va' not in kwargs):
kwargs['va'] = 'center'
if 'backgroundcolor' not in kwargs:
kwargs['backgroundcolor'] = ax.get_axis_bgcolor()
if 'clip_on' not in kwargs:
kwargs['clip_on'] = True
if 'zorder' not in kwargs:
kwargs['zorder'] = 2.5
ax.text(x,y,label,rotation=trans_angle,**kwargs)
# =====================================================================
# =====================================================================
# 3)
def labelLines(lines, align=True, xvals=None, **kwargs):
ax = lines[0].get_axes()
labLines = []
labels = []
#Take only the lines which have labels other than the default ones
for line in lines:
label = line.get_label()
if "_line" not in label:
labLines.append(line)
labels.append(label)
if xvals is None:
xmin,xmax = ax.get_xlim()
xvals = np.linspace(xmin,xmax,len(labLines)+2)[1:-1]
for line,x,label in zip(labLines,xvals,labels):
labelLine(line,x,label,align,**kwargs)
# =====================================================================
# =====================================================================
# 4)
# =====================================================================
|
983,695 | 5bc32afb0c6cc8f446c0ad623dea6bd493386e5a | import sys
from mainwindow_ui import Ui_MainWindow
from popup_ui import Ui_Dialog
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QSpinBox, QWidget, QDialog
class mainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super(mainWindow, self).__init__()
self.setupUi(self)
self.pushButton_schreib_was.clicked.connect(self.shreib_endlich_was)
self.pushButton_clear.clicked.connect(self.clear)
self.spinBox.valueChanged.connect(self.spinbox_value)
self.label.setText('Hallo 0')
def shreib_endlich_was(self):
self.textBrowser.setPlainText('Hallo Uli!')
def spinbox_value(self):
val = str(self.spinBox.value())
self.label.setText('Hallo ' + val)
def clear(self):
app = QApplication(sys.argv)
form = popup()
form.show()
|
983,696 | 1eeffd17dc6304be84e73fe1bad68bb50174ba4e | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 11:06:11 2021
@author: Vitamin-C
"""
# import os
# os.chdir('D:\\Projects\\python_work\\tensorflow\\testing_creation_nn')
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras.preprocessing.image import ImageDataGenerator as idg
from sklearn.model_selection import train_test_split
import numpy as np
import json
from contextlib import redirect_stdout
# Import custom classes and modules
from MyModel import MyModel
from results import results
from predict_image_number import predict_number
"""
An attempted implementaion of Savita Ahlawat et al. Improved Handwritten Digit Recognition Using
Convolutional Neural Networks (CNN). Sensors (2020), 20, pg. 3344 to the MNIST dataset.
Set the appropriate dir using greyed import commands.
"""
# Let us use the MNIST Database of numerical images
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Reshape for datagen (requires rank 4)
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
# Split the training values into a training and an evaluation set
x_train, x_val, y_train, y_val = train_test_split(x_train,
y_train,
test_size = 0.05,
random_state = 0)
# Pre-process the data
datagen = idg(rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
featurewise_center=True,
featurewise_std_normalization = True,
zca_whitening = True,
)
# Define an empty scaler for the data
# Define data normalisation here, since the def is short we won't create a new module
def normalize(x_train, x_val, x_test):
x_train = x_train.astype("float32")
x_val = x_val.astype("float32")
x_test = x_test.astype("float32")
x_train = x_train / 255.0
x_val = x_val / 255.0
x_test = x_test / 255.0
return x_train, x_val, x_test
x_train, x_val, x_test = normalize(x_train, x_val, x_test)
datagen.fit(x_train)
datagen.fit(x_val)
# One-hot encode the results
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)
y_val = tf.keras.utils.to_categorical(y_val, 10)
# Generate the model with or without (default) dropout
model = MyModel(use_dp = True)
model = model.model()
options = tf.keras.optimizers.Adam(learning_rate = 0.001, beta_1 = 0.9, beta_2 = 0.999,
epsilon = 1e-7)
model.compile(optimizer=options, loss = tf.losses.categorical_crossentropy,
metrics = ['accuracy'])
results(model, x_train, y_train,
x_val, y_val,
x_test, y_test)
# Save model features
tf.keras.utils.plot_model(model, to_file='mnist_model.png', show_shapes=True)
model.save('mnist_model.h5')
# Save individual config
config = model.get_config()
weights = model.get_weights()
with open('modelsummary.json', 'w') as f:
with redirect_stdout(f):
model.summary()
with open("config.json", "w") as file:
json.dump(config, file)
with open('weights.txt', 'w') as we:
weights = np.asarray(weights)
np.savetxt(we, weights, fmt = '%s')
# Make some predictions
predict_number('noisy_3.jpg', model)
predict_number('clean_7.png', model)
|
983,697 | 2bf2e50182b816eaed96c2384ee75bb0935eb0b1 | """
A module defining a list of fixture functions that are shared across all the skabase
tests.
"""
from __future__ import absolute_import
# from unittest import mock
import pytest
import logging
import importlib
import os
import sys
import time
import json
import tango
from tango import DevState
from tango import DeviceProxy
from tango.test_context import MultiDeviceTestContext, get_host_ip
import socket
from ska_tango_base.control_model import LoggingLevel, ObsState, AdminMode
#TODO clean up file path navigation with proper packaging
from ska_mid_cbf_mcs.dev_factory import DevFactory
from ska_mid_cbf_mcs.vcc.vcc_device import Vcc
def pytest_addoption(parser):
"""
Pytest hook; implemented to add the `--test-context` option, used to
indicate that a test Tango subsystem is available; otherwise there is no
need for a :py:class:`tango.test_context.MultiDeviceTestContext`.
:param parser: the command line options parser
:type parser: :py:class:`argparse.ArgumentParser`
"""
parser.addoption(
"--test-context",
action="store_true",
default=False,
help=(
"Tell pytest that you have a true Tango context and don't "
"need to spin up a Tango test context"
),
)
@pytest.fixture
def tango_context(devices_to_load, request):
test_context = request.config.getoption("--test-context")
logging.info("test context: %s", test_context)
if test_context:
with MultiDeviceTestContext(devices_to_load, process=False) as context:
DevFactory._test_context = context
Vcc.TEST_CONTEXT = True
yield context
else:
Vcc.TEST_CONTEXT = False
yield None
#TODO: mocker patch may allow for DeviceProxy workaround in test context usage
# @pytest.fixture(scope="module")
# def devices_to_test(request):
# yield getattr(request.module, "devices_to_test")
# @pytest.fixture(scope="function")
# def multi_device_tango_context(
# devices_to_test # pylint: disable=redefined-outer-name
# ):
# """
# Creates and returns a TANGO MultiDeviceTestContext object, with
# tango.DeviceProxy patched to work around a name-resolving issue.
# """
# def _get_open_port():
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.bind(("", 0))
# s.listen(1)
# port = s.getsockname()[1]
# s.close()
# return port
# HOST = get_host_ip()
# PORT = _get_open_port()
# _DeviceProxy = tango.DeviceProxy
# mock.patch(
# 'tango.DeviceProxy',
# wraps=lambda fqdn, *args, **kwargs: _DeviceProxy(
# "tango://{0}:{1}/{2}#dbase=no".format(HOST, PORT, fqdn),
# *args,
# **kwargs
# ),
# )
# with MultiDeviceTestContext(
# devices_to_test, host=HOST, port=PORT, process=True
# ) as context:
# yield context
@pytest.fixture(name="proxies", scope="session")
def init_proxies_fixture():
class Proxies:
def __init__(self):
# NOTE: set debug_device_is_on to True in order
# to allow device debugging under VScode
self.debug_device_is_on = False
if self.debug_device_is_on:
# Increase the timeout in order to allow time for debugging
timeout_millis = 500000
else:
timeout_millis = 60000
self.vcc = {}
for i, proxy in enumerate([DeviceProxy("mid_csp_cbf/vcc/" + str(j + 1).zfill(3)) for j in range(4)]):
proxy.loggingLevel = LoggingLevel.DEBUG
self.vcc[i + 1] = proxy
band_tags = ["12", "3", "4", "5"]
self.vccBand = [[DeviceProxy("mid_csp_cbf/vcc_band{0}/{1:03d}".format(j, k + 1)) for j in band_tags] for k in range(4)]
self.vccTdc = [[DeviceProxy("mid_csp_cbf/vcc_sw{0}/{1:03d}".format(j, i + 1)) for j in ["1", "2"]] for i in range(4)]
self.fspSubarray = {} # index 1, 2 = corr (01_01, 02_01); index 3, 4 = pss (03_01, 04_01); index 5, 6 = pst (01_01, 02_01)
self.fspCorrSubarray = {}
for i, proxy in enumerate([DeviceProxy("mid_csp_cbf/fspCorrSubarray/" + str(j + 1).zfill(2) + "_01") for j in range(2)]):
proxy.loggingLevel = LoggingLevel.DEBUG
self.fspSubarray[i + 1] = proxy
self.fspCorrSubarray[i] = proxy
self.fspPssSubarray = {}
for i ,proxy in enumerate([DeviceProxy("mid_csp_cbf/fspPssSubarray/" + str(j + 3).zfill(2) + "_01") for j in range(2)]):
proxy.loggingLevel = LoggingLevel.DEBUG
self.fspSubarray[i + 3] = proxy
self.fspPssSubarray[i] = proxy
self.fspPstSubarray = {}
for i ,proxy in enumerate([DeviceProxy("mid_csp_cbf/fspPstSubarray/" + str(j + 1).zfill(2) + "_01") for j in range(2)]):
proxy.Init()
self.fspSubarray[i + 5] = proxy
self.fspPstSubarray[i] = proxy
self.fsp1FunctionMode = [*map(DeviceProxy, ["mid_csp_cbf/fsp_{}/01".format(i) for i in ["corr", "pss", "pst", "vlbi"]])]
self.fsp2FunctionMode = [*map(DeviceProxy, ["mid_csp_cbf/fsp_{}/02".format(i) for i in ["corr", "pss", "pst", "vlbi"]])]
self.fsp3FunctionMode = [*map(DeviceProxy, ["mid_csp_cbf/fsp_{}/03".format(i) for i in ["corr", "pss", "pst", "vlbi"]])]
self.fsp4FunctionMode = [*map(DeviceProxy, ["mid_csp_cbf/fsp_{}/04".format(i) for i in ["corr", "pss", "pst", "vlbi"]])]
self.fsp = {}
for i, proxy in enumerate([DeviceProxy("mid_csp_cbf/fsp/" + str(j + 1).zfill(2)) for j in range(4)]):
proxy.loggingLevel = LoggingLevel.DEBUG
proxy.Init()
self.fsp[i + 1] = proxy
# self.sw = {}
# for i, proxy in enumerate([DeviceProxy("mid_csp_cbf/sw" + str(j + 1) + "/01") for j in range(2)]):
# proxy.Init()
# self.sw[i + 1] = proxy
self.controller = DeviceProxy("mid_csp_cbf/sub_elt/controller")
self.controller.loggingLevel = LoggingLevel.DEBUG
self.controller.set_timeout_millis(timeout_millis)
self.wait_timeout_dev([self.controller], DevState.STANDBY, 3, 0.05)
self.receptor_to_vcc = dict([*map(int, pair.split(":"))] for pair in self.controller.receptorToVcc)
self.subarray = {}
for i, proxy in enumerate([DeviceProxy("mid_csp_cbf/sub_elt/subarray_" + str(i + 1).zfill(2)) for i in range(3)]):
proxy.loggingLevel = LoggingLevel.DEBUG
self.subarray[i + 1] = proxy
self.subarray[i + 1].set_timeout_millis(timeout_millis)
self.tm = DeviceProxy("ska_mid/tm_leaf_node/csp_subarray_01")
self.tm.Init()
def clean_proxies(self):
self.receptor_to_vcc = dict([*map(int, pair.split(":"))] for pair in self.controller.receptorToVcc)
for proxy in [self.subarray[i + 1] for i in range(1)]:
if proxy.obsState == ObsState.SCANNING:
proxy.EndScan()
self.wait_timeout_obs([proxy], ObsState.READY, 3, 0.05)
if proxy.obsState == ObsState.READY:
proxy.GoToIdle()
self.wait_timeout_obs([proxy], ObsState.IDLE, 3, 0.05)
if proxy.obsState == ObsState.IDLE:
proxy.RemoveAllReceptors()
self.wait_timeout_obs([proxy], ObsState.EMPTY, 3, 0.05)
if proxy.obsState == ObsState.EMPTY:
proxy.Off()
self.wait_timeout_dev([proxy], DevState.OFF, 3, 0.05)
for vcc_proxy in [self.vcc[i + 1] for i in range(4)]:
if vcc_proxy.State() == DevState.ON:
vcc_proxy.Off()
self.wait_timeout_dev([vcc_proxy], DevState.OFF, 1, 0.05)
for fsp_proxy in [self.fsp[i + 1] for i in range(4)]:
if fsp_proxy.State() == DevState.ON:
fsp_proxy.Off()
self.wait_timeout_dev([fsp_proxy], DevState.OFF, 1, 0.05)
def wait_timeout_dev(self, proxygroup, state, time_s, sleep_time_s):
#time.sleep(time_s)
timeout = time.time_ns() + (time_s * 1_000_000_000)
while time.time_ns() < timeout:
for proxy in proxygroup:
if proxy.State() == state: break
time.sleep(sleep_time_s)
def wait_timeout_obs(self, proxygroup, state, time_s, sleep_time_s):
#time.sleep(time_s)
timeout = time.time_ns() + (time_s * 1_000_000_000)
while time.time_ns() < timeout:
for proxy in proxygroup:
if proxy.obsState == state: break
time.sleep(sleep_time_s)
return Proxies()
@pytest.fixture(name="input_test_data", scope="class", \
params = [
([1, 3, 4, 2], "/../data/ConfigureScan_basic.json") ] )
#params = [
# ([4, 1, 2], "/test_json/Configure_TM-CSP_v2.json") ] )
# params = [
# ([1, 3, 4, 2], "/test_json/ConfigureScan_basic.json"),
# ([4, 1, 2], "/test_json/Configure_TM-CSP_v2.json") ] )
def input_test_data(request):
file_name = request.param
yield file_name
@pytest.fixture(scope="class")
def debug_device_is_on():
# NOTE: set debug_device_is_on to True in order
# to allow device debugging under VScode
debug_device_is_on = False
if debug_device_is_on:
# Increase the timeout in order to allow time for debugging
timeout_millis = 500000
return debug_device_is_on
@pytest.fixture(scope="class")
def create_vcc_proxy():
dp = DeviceProxy("mid_csp_cbf/vcc/001")
dp.loggingLevel = LoggingLevel.DEBUG
return dp
@pytest.fixture(scope="class")
def create_band_12_proxy():
#return DeviceTestContext(VccBand1And2)
return DeviceProxy("mid_csp_cbf/vcc_band12/001")
@pytest.fixture(scope="class")
def create_band_3_proxy():
#return DeviceTestContext(VccBand3)
return DeviceProxy("mid_csp_cbf/vcc_band3/001")
@pytest.fixture(scope="class")
def create_band_4_proxy():
#return DeviceTestContext(VccBand4)
return DeviceProxy("mid_csp_cbf/vcc_band4/001")
@pytest.fixture(scope="class")
def create_band_5_proxy():
#return DeviceTestContext(VccBand5)
return DeviceProxy("mid_csp_cbf/vcc_band5/001")
@pytest.fixture(scope="class")
def create_sw_1_proxy():
#return DeviceTestContext(VccSearchWindow)
return DeviceProxy("mid_csp_cbf/vcc_sw1/001")
@pytest.fixture(scope="class")
def create_fsp_corr_subarray_1_1_proxy():
return DeviceProxy("mid_csp_cbf/fspCorrSubarray/01_01")
@pytest.fixture(scope="class")
def create_fsp_pss_subarray_2_1_proxy():
return DeviceProxy("mid_csp_cbf/fspPssSubarray/02_01")
@pytest.fixture(scope="class")
def create_corr_proxy():
return DeviceProxy("mid_csp_cbf/fsp_corr/01")
@pytest.fixture(scope="class")
def create_pss_proxy():
return DeviceProxy("mid_csp_cbf/fsp_pss/01")
def load_data(name):
"""
Loads a dataset by name. This implementation uses the name to find a
JSON file containing the data to be loaded.
:param name: name of the dataset to be loaded; this implementation
uses the name to find a JSON file containing the data to be
loaded.
:type name: string
"""
with open(f"tests/data/{name}.json", "r") as json_file:
return json.load(json_file)
|
983,698 | b4a227d81e673e4e44f914cebfc70b5fc95b117c | a='onion'
a=a[0]+a[1:].replace("o","$")
print(a)
|
983,699 | 32824320a736eba69416c3ab8e8bc79f636f6233 | import os
import sys
import random
import numpy as np
import torch
import torch.nn as nn
from openmm import OpenMMException
from replaybuffer import ReplayBuffer
from cdqlnetwork import Model
class CDQL:
def __init__(self, system, all_actions, num_explore_episodes, gamma,
device=torch.device("cuda:0"), folder_name="./",
centralize_states=False, centralize_rewards=False,
update_num=20):
self.gamma = gamma
self.batch_size = 32
self.buffer = ReplayBuffer(1e6)
self.folder_name = folder_name
self.device = device
self.centralize_states = centralize_states
self.centralize_rewards = centralize_rewards
self.system = system
self.num_explore_episodes = num_explore_episodes
self.all_actions = all_actions
self.num_actions = len(self.all_actions)
if self.centralize_states:
num_bins = self.system.num_bins * 2
else:
num_bins = self.system.num_bins
self.model = Model(self.device, num_bins=num_bins,
num_actions=self.num_actions)
self.loss = []
self.store_Q = []
self.training_iter = 0
self.update_freq = 2
self.update_num = update_num
def _update(self):
"""Updates q1, q2, q1_target and q2_target networks based on
clipped Double Q Learning Algorithm
"""
if (len(self.buffer) < self.batch_size):
return
self.training_iter += 1
# Make sure actor_target and critic_target are in eval mode
assert not self.model.q_target_1.training
assert not self.model.q_target_2.training
assert self.model.q_1.training
assert self.model.q_2.training
transitions = self.buffer.sample(self.batch_size)
batch = self.buffer.transition(*zip(*transitions))
state_batch = torch.tensor(batch.state, device=self.device).float()
action_batch = torch.tensor(batch.action,
device=self.device).unsqueeze(-1).long()
reward_batch = torch.tensor(batch.reward,
device=self.device).unsqueeze(-1).float()
next_state_batch = torch.tensor(batch.next_state,
device=self.device).float()
is_done_batch = torch.tensor(batch.done,
device=self.device).unsqueeze(-1).bool()
with torch.no_grad():
Q_next_1 = ((~is_done_batch)
* (self.model.q_target_1(next_state_batch).min(dim=-1)[0].unsqueeze(-1)))
Q_next_2 = ((~is_done_batch)
* (self.model.q_target_2(next_state_batch).min(dim=-1)[0].unsqueeze(-1)))
# Use max want to avoid underestimation bias
Q_next = torch.max(Q_next_1, Q_next_2)
Q_expected = reward_batch + self.gamma * Q_next
Q_1 = self.model.q_1(state_batch).gather(-1, action_batch)
Q_2 = self.model.q_2(state_batch).gather(-1, action_batch)
L_1 = nn.MSELoss()(Q_1, Q_expected)
L_2 = nn.MSELoss()(Q_2, Q_expected)
self.loss.append([L_1.item(), L_2.item()])
self.model.q_optimizer_1.zero_grad()
self.model.q_optimizer_2.zero_grad()
L_1.backward()
L_2.backward()
self.model.q_optimizer_1.step()
self.model.q_optimizer_2.step()
self.store_Q.append([Q_1.tolist(), Q_2.tolist(), Q_expected.tolist()])
if (self.training_iter % self.update_freq) == 0:
self.model.update_target_nn()
def _get_state(self, grid_dist, surrounding_dist):
"""Gets concatenated state if including state information about surrounding regions
(i.e. if self.centralize_states). Inputs a list of states for multiple regions/surrounding regions
Args:
grid_dist: A 2D List representing the state of the current region
surrounding_dist: A 2D List representing the state of the surrounding region
Returns:
A 2D List representing the final state information to use
"""
if (self.centralize_states):
grid_dist = torch.tensor(grid_dist, device=self.device).float()
surrounding_dist = torch.tensor(surrounding_dist,
device=self.device).float()
cat_dist = torch.cat((grid_dist, surrounding_dist), dim=1)
return cat_dist.tolist()
else:
return grid_dist
def _get_reward(self, grid_reward, surrounding_reward):
"""Gets concatenated rewards if including cost information about surrounding regions
(i.e. if self.centralize_rewards). Inputs a list of rewards for multiple regions/surrounding regions
Args:
grid_reward: A list representing the reward of the current region
surrounding_reward: A list representing the reward of the surrounding regions
Returns:
A list representing the final reward information to use
"""
if (self.centralize_rewards):
grid_reward = torch.tensor(grid_reward, device=self.device).float()
surrounding_reward = torch.tensor(surrounding_reward,
device=self.device).float()
final_reward = (grid_reward + surrounding_reward) / 2
return final_reward.tolist()
else:
return grid_reward
def _get_action(self, state, episode):
"""Gets action given some state
if episode is less than 5 returns a random action for each region
Args:
state: List of states (corresponding to each region)
episode: episode number
"""
if (episode < self.num_explore_episodes):
action = [random.choice(list(range(self.num_actions)))
for _ in range(len(state))]
return action
action = []
self.model.q_1.eval()
with torch.no_grad():
state = torch.tensor(state, device=self.device).float()
action = torch.argmin(self.model.q_1(state), dim=-1).tolist()
self.model.q_1.train()
return action
def _save_data(self):
filename = self.folder_name + "replaybuffer"
np.save(filename, np.array(self.buffer.buffer, dtype=object))
filename = self.folder_name + "loss"
np.save(filename, np.array(self.loss))
filename = self.folder_name + "Q_pair.npy"
np.save(filename, np.array(self.store_Q, dtype=object))
self.model.save_networks(self.folder_name)
def _save_episode_data(self, episode_folder_name):
filename = episode_folder_name + "replaybuffer"
np.save(filename, np.array(self.buffer.buffer, dtype=object))
self.model.save_networks(episode_folder_name)
def load_data(self):
self.loss = torch.load(self.folder_name + "loss.pt").tolist()
self.buffer.load_buffer(self.folder_name + "replaybuffer.npy")
self.model.load_networks(self.folder_name)
def train(self, num_decisions=350):
"""Train q networks based on Clipped Double Q Learning
Args:
num_decisions: Number of decisions to train algorithm for
"""
os.system("mkdir " + self.folder_name + "Train")
for i in range(5000):
episode_folder_name = self.folder_name + "Train/" + str(i) + "/"
all_system_states = []
all_system_rewards = []
all_system_states_cluster = []
all_grid_states_cluster = []
all_surrounding_states_cluster = []
os.system("mkdir " + episode_folder_name)
filename = episode_folder_name + str(i) + ".h5"
self.system.reset_context(filename)
self.system.run_decorrelation(20)
grid_dist, surrounding_dist, _, _, _, _ = self.system.get_state_reward()
state = self._get_state(grid_dist, surrounding_dist)
for j in range(num_decisions):
action_index = self._get_action(state, i)
transition_to_add = [state, action_index]
tag = "_train_" + str(j)
actions = [self.all_actions[i] for i in action_index]
try:
self.system.update_action(actions)
system_states, system_rewards, system_states_cluster = self.system.run_step(
is_detailed=True, tag=tag)
all_system_states.append(system_states)
all_system_rewards.append(system_rewards)
all_system_states_cluster.append(system_states_cluster)
except OpenMMException:
print("Broken Simulation at Episode:",
str(i), ", Decision:", str(j))
break
grid_dist, surrounding_dist, grid_reward, surrounding_reward, grid_states_cluster, surrounding_states_cluster = self.system.get_state_reward()
state = self._get_state(grid_dist, surrounding_dist)
reward = self._get_reward(grid_reward, surrounding_reward)
all_grid_states_cluster.append(grid_states_cluster)
all_surrounding_states_cluster.append(surrounding_states_cluster)
# Use len_reward for number of grids
done = [False] * len(reward) # Never Done
transition_to_add.extend([reward, state, done])
rb_decision_samples = 0
for rb_tuple in zip(*transition_to_add):
self.buffer.push(*list(rb_tuple))
for _ in range(self.update_num):
self._update()
self._save_episode_data(episode_folder_name)
np.save(episode_folder_name + "system_states",
np.array(all_system_states))
np.save(episode_folder_name + "system_rewards",
np.array(all_system_rewards))
np.save(episode_folder_name + "system_states_cluster",
np.array(all_system_states_cluster))
np.save(episode_folder_name + "grid_states_cluster",
np.array(all_grid_states_cluster, dtype=object))
np.save(episode_folder_name + "all_states_cluster",
np.array(all_surrounding_states_cluster))
self._save_data()
def test(self, num_decisions=1000):
"""Given trained q networks, generate trajectories
Saves:
grid_rewards: Numpy array of all the rewards of each region along traj
grid_states: Numpy array of all the states (i.e. normalized distibution of cluster sizes)
of each region along traj
grid_states_cluster: Numpy array of all the cluster sizes of each region along traj
actions: Numpy array of actions taken along trajectory
dissipation: Total dissipation (not average dissipation rate) along trajectory
system_states: Numpy array of states of the system along traj:
system_states_cluster: Numpy array of cluster sizes along traj
system_rewards: Numpy array of reward of entire system along traj
"""
all_grid_states = []
all_grid_rewards = []
all_grid_states_cluster = []
all_surrounding_states_cluster = []
all_system_rewards = []
all_system_states = []
all_system_states_cluster = []
all_actions = []
all_dissipation = []
os.system("mkdir " + self.folder_name + "Test/")
filename = self.folder_name + "Test/" + "TEST.h5"
self.system.reset_context(filename)
tag = "_test_init"
self.system.run_decorrelation(20)
grid_dist, surrounding_dist, _, _, _, _ = self.system.get_state_reward()
state = self._get_state(grid_dist, surrounding_dist)
all_dissipation.append(self.system.get_dissipation())
for i in range(num_decisions):
action_index = self._get_action(state, episode=10000)
tag = "_test_" + str(i)
actions = [self.all_actions[i] for i in action_index]
all_actions.append(actions)
self.system.update_action(actions)
system_states, system_rewards, system_states_cluster = self.system.run_step(
is_detailed=True, tag=tag)
grid_dist, surrounding_dist, grid_reward, surrounding_reward, grid_states_cluster, surrounding_states_cluster = self.system.get_state_reward()
# The "grid states" and dissipation are recorded at the end of a decision
# Dissipation here is total entropy production (not epr)
# Actions are recorded at the beginning of the decision
state = self._get_state(grid_dist, surrounding_dist)
reward = self._get_reward(grid_reward, surrounding_reward)
all_grid_states.append(state)
all_grid_rewards.append(reward)
all_grid_states_cluster.append(grid_states_cluster)
all_surrounding_states_cluster.append(surrounding_states_cluster)
all_dissipation.append(self.system.get_dissipation())
# The "System States" are recorded every 0.25 seconds. Excludes 0th second
all_system_states.append(system_states)
# Just to have a 1D array use extend
all_system_rewards.append(system_rewards)
all_system_states_cluster.append(system_states_cluster)
if (i % 100 == 99):
np.save(self.folder_name + "grid_rewards",
np.array(all_grid_rewards))
np.save(self.folder_name + "grid_states",
np.array(all_grid_states, dtype=object))
np.save(self.folder_name + "grid_states_cluster",
np.array(all_grid_states_cluster, dtype=object))
np.save(self.folder_name + "surrounding_states_cluster",
np.array(all_surrounding_states_cluster))
np.save(self.folder_name + "actions", np.array(all_actions))
np.save(self.folder_name + "dissipation",
np.array(all_dissipation))
np.save(self.folder_name + "system_states",
np.array(all_system_states))
np.save(self.folder_name + "system_states_cluster",
np.array(all_system_states_cluster))
np.save(self.folder_name + "system_rewards",
np.array(all_system_rewards))
if __name__ == "__main__":
if (len(sys.argv) > 1):
centralize_code = int(sys.argv[-2])
centralize_approach = sys.argv[-1]
if (centralize_code == 0):
centralize_states = False
centralize_rewards = False
elif (centralize_code == 1):
centralize_states = True
centralize_rewards = False
elif (centralize_code == 2):
centralize_states = False
centralize_rewards = True
elif (centralize_code == 3):
centralize_states = True
centralize_rewards = True
else:
raise ValueError("Wrong Centralize Code")
if (centralize_approach == "none"):
if (centralize_code > 0):
print("Not running none with centralized states/rewards")
exit()
centralize_approach = None
elif (centralize_code == 0):
print("Not running not none without centralized states/rewards")
exit()
elif (centralize_approach == "all"):
centralize_approach = "all"
elif (centralize_approach == "plaquette"):
centralize_approach = "plaquette"
elif (centralize_approach == "grid_1"):
centralize_approach = 1
elif (centralize_approach == "grid_2"):
centralize_approach = 2
else:
raise ValueError("Wrong Centralize Approach")
c = CDQL(centralize_states=centralize_states,
centralize_rewards=centralize_rewards,
centralize_approach=centralize_approach)
else:
c = CDQL(centralize_states=True, centralize_rewards=True,
centralize_approach="plaquette")
c.train()
# c.model.load_networks()
# c.test()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.