id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4907621 | <filename>moonleap/utils/join.py
def join(prefix, infix, postfix=""):
if infix:
return prefix + infix + postfix
return infix
| StarcoderdataPython |
1811862 | <filename>index2.py
def add_task(any_list):
user_add_task = input("Enter a task you would like to add to the list: ")
any_list.append(user_add_task)
def delete_task(any_list):
user_del_task = input("Enter a task you would like to delete from the list: ")
if user_del_task in any_list:
del_index = any_list.index(user_del_task)
del any_list[del_index]
| StarcoderdataPython |
101294 | import torch
import shutil
import numpy as np
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
# import cv2
from skimage.transform import resize
import torchvision.transforms as transforms
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
def adjust_learning_rate(optimizer, epoch, args, interval):
"""Sets the learning rate to the initial LR decayed by 10 every 100 epochs"""
lr = args.lr
if epoch < interval[0]:
lr = args.lr
elif epoch >= interval[0] and epoch < interval[1]:
lr = args.lr * 0.1
else:
lr = args.lr * 0.01
#lr = args.lr * (0.1 ** (epoch // 100))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def multi_class_auc(all_target, all_output, num_c = None):
from sklearn.preprocessing import label_binarize
# all_output = np.stack(all_output)
all_target = label_binarize(all_target, classes=list(range(0, num_c)))
all_output = label_binarize(all_output, classes=list(range(0, num_c)))
auc_sum = []
for num_class in range(0, num_c):
try:
auc = roc_auc_score(all_target[:, num_class], all_output[:, num_class])
auc_sum.append(auc)
except ValueError:
pass
auc = sum(auc_sum) / (float(len(auc_sum))+1e-8)
return auc
def evaluation_metrics(label, pred, C):
if C==2:
auc = roc_auc_score(label, pred)
else:
auc = multi_class_auc(label, pred, num_c=C)
corrects = np.equal(np.array(label), np.array(pred))
acc = float(sum(corrects)) / len(corrects)
# mean class
precision = precision_score(label, pred, average='macro')
recall = recall_score(label, pred, average='macro')
f1score = f1_score(label, pred, average='macro')
return round(auc, 4), round(acc, 4), round(precision, 4), round(recall, 4), round(f1score, 4)
def showfeature(x, savename):
# trun to numpy
x = x.data.cpu().numpy()
print (x.shape)
box = []
for item in range(0, x.shape[0]):
x_patch = x[item, :, :]
box.append(x_patch)
x_patch = np.stack(box)
x_patch = np.max(x_patch, axis=0)
x_patch = resize(x_patch, (224, 224), order=3, mode='constant',
cval=0, clip=True, preserve_range=True)
x_patch = (x_patch - np.min(x_patch)) / (np.max(x_patch) - np.min(x_patch) + 1e-11)
x_patch = x_patch * 255
x_patch = np.array(x_patch, dtype="uint8")
plt.plot(1), plt.imshow(x_patch, cmap='jet')
plt.axis('off')
plt.savefig(savename, bbox_inches='tight', pad_inches=0)
def showimage(x, savename):
import torchvision.transforms as transforms
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
z = x * torch.tensor(std).view(3, 1, 1).cuda()
z = z + torch.tensor(mean).view(3, 1, 1).cuda()
z = z.cpu()
z = z[[2,1,0], : ,:]
img2 = transforms.ToPILImage()(z)
img2.save(savename)
def get_color_distortion(s=1.0):
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
rnd_gray = transforms.RandomGrayscale(p=0.2)
color_distort = transforms.Compose([rnd_color_jitter, rnd_gray])
return color_distort
def gaussian_blur(x):
from PIL.ImageFilter import GaussianBlur
if np.random.randint(0, 2) == 1:
x = x.filter(GaussianBlur(radius=np.random.uniform(0.1, 2.0)))
return x
| StarcoderdataPython |
11332466 | """Adds user teams
Revision ID: <KEY>
Revises: 95d86da7b3c7
Create Date: 2020-08-19 14:20:57.539270
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "95d86da7b<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"user_teams",
sa.Column("user_xid", sa.BigInteger(), nullable=False),
sa.Column("guild_xid", sa.BigInteger(), nullable=False),
sa.Column("team_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(["guild_xid"], ["servers.guild_xid"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["team_id"], ["teams.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["user_xid"], ["users.xid"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("user_xid", "guild_xid"),
)
def downgrade():
op.drop_table("user_teams")
| StarcoderdataPython |
9611175 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
app_name = 'oauth_clients'
urlpatterns = [
url(
regex="^Client/~create/$",
view=views.ClientCreateView.as_view(),
name='Client_create',
),
url(
regex="^Client/(?P<pk>\d+)/~delete/$",
view=views.ClientDeleteView.as_view(),
name='Client_delete',
),
url(
regex="^Client/(?P<pk>\d+)/$",
view=views.ClientDetailView.as_view(),
name='Client_detail',
),
url(
regex="^Client/(?P<pk>\d+)/~update/$",
view=views.ClientUpdateView.as_view(),
name='Client_update',
),
url(
regex="^Client/$",
view=views.ClientListView.as_view(),
name='Client_list',
),
url(
regex="^AccessToken/~create/$",
view=views.AccessTokenCreateView.as_view(),
name='AccessToken_create',
),
url(
regex="^AccessToken/(?P<pk>\d+)/~delete/$",
view=views.AccessTokenDeleteView.as_view(),
name='AccessToken_delete',
),
url(
regex="^AccessToken/(?P<pk>\d+)/$",
view=views.AccessTokenDetailView.as_view(),
name='AccessToken_detail',
),
url(
regex="^AccessToken/(?P<pk>\d+)/~update/$",
view=views.AccessTokenUpdateView.as_view(),
name='AccessToken_update',
),
url(
regex="^AccessToken/$",
view=views.AccessTokenListView.as_view(),
name='AccessToken_list',
),
]
| StarcoderdataPython |
3460483 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 22 13:05:13 2016
@author: chyam
"""
#import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sb
import analyticutils as au
import argparse
def main():
argparser = argparse.ArgumentParser(description='This script will perform simple data analysis on a text classification dataset.')
argparser.add_argument('-i', '--inFile', help='Input filename', required=True)
argparser.add_argument('-o', '--outFile', help='Output filename', required=True)
args = argparser.parse_args()
# Load data
#filename = "./data/clean-data-21092016.tsv"
filename = args.inFile
df = pd.read_csv(filename, delimiter='\t')
# Unique samples per label; label_count is a series
label_count = df['Label'].value_counts(); print(label_count)
# Visualisation (Seaborn): sample count per label
sb.set(style="whitegrid", color_codes=True)
sb.countplot(x="Label", data=df)
# Visualisation (plotly): pie
au.draw_pie(label_count, args.outFile)
# Just a message
print("Data analysed .........")
if __name__ == '__main__':
main()
#==============================================================================
# # Visualisation(matplotlib): sample count per label
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.set_xlabel('Labels')
# ax.set_ylabel('Counts for each label')
# ax.set_title("Sample count per label")
# label_count.plot(kind='bar')
#==============================================================================
| StarcoderdataPython |
364127 | # Date: 2020/11/05
# Author: <NAME>
# Description:
# ##
#
#
def square():
#DEFINING EPSILON (MARGIN OF ERROR)
epsilon = 0.01
#DEFINING STEP (HOW MUCH I CAN APPROXIMATE IN EACH STEP)
step = epsilon**2
#DEFINFING ANSWER
answer = 0.0
#DEFINING OBJECTIVE (TO CALCULATE SQUARE)
objective = int(input(f"\nType an integer number.\nNumber: "))
print(f"\n")
while ((abs(answer**2 - objective) >= epsilon) and (answer <= objective)):
# print(f"abs(answer**2 - objective) = {abs(answer**2 - objective)}\nanswer = {answer}\n")
answer += step
if (abs(answer**2 - objective) >= epsilon):
print(f"We didn't find the square of the number {objective}")
else:
print(f"The square of {objective} is {answer}")
#RUN(): ON THIS FUNCTION WE'LL RUN OUR CODE
def run():
square()
#MAIN(): MAIN FUNCTION
if __name__ == "__main__":
run() | StarcoderdataPython |
362716 | #!/usr/bin/env python
# encoding: utf-8
"""
fields.py
Created by <NAME> on 2013-09-23.
Copyright (c) 2012 Multmeio [design+tecnologia]. All rights reserved.
"""
from django.db import models
from django_orm.postgresql.hstore import HStoreManager
from hstore_flattenfields.db.queryset import FlattenFieldsFilterQuerySet
class FlattenFieldsFilterManager(HStoreManager):
def get_query_set(self):
return FlattenFieldsFilterQuerySet(model=self.model, using=self._db)
def _insert(self, *args, **kwargs):
def is_not_dynamic(field):
return not field.db_type == 'dynamic_field'
kwargs.update({
'fields': filter(is_not_dynamic, kwargs.get('fields'))
})
return super(FlattenFieldsFilterManager, self)._insert(*args, **kwargs) | StarcoderdataPython |
6444923 | #1. 首先创建100个Dog, 50个SheepDog, 50个PoliceDog
#2. 每一轮游戏,随机选出2个Dog
#3. dog1先攻击dog2,然后dog2攻击dog1
#3. 任何一方血量变为0就表明死亡!死亡的Dog退出游戏。
#4. 最后只有一个Dog了,游戏结束,胜利者可以吃鸡。
from dog import *
import random #产生随机数字
import time #时间模块
#1.创建100条狗
for i in range(100):
if(i%2==0):
SheepDog(i+1) #创建1个牧羊犬
else:
PoliceDog(i+1) #创建1个警犬
#2. 开始游戏循环
while(True):
#判断是否只有1个Dog
if(len(Dog.dogs) == 1):
winner = Dog.dogs[0]
print('🐔🐔🐔🐔🐔🐔🐔🐔')
print('大吉大利,今晚吃鸡!')
print(f'赢家是:{winner.dog_type()} {winner.name}')
print('🐔🐔🐔🐔🐔🐔🐔🐔')
break
dog1, dog2 = random.sample(Dog.dogs, 2)
dog1.attack(dog2)
dog2.attack(dog1)
time.sleep(0.02)
| StarcoderdataPython |
11355651 | <reponame>haibeey/doclite
import pydoclite #install pydoclite
import os,glob
def removeFiles():
for f in glob.glob("doclitetest.db*"):
try:
os.remove(f)
except OSError:
pass
removeFiles()
d = pydoclite.Doclite.Connect(b"doclitetest.db")
baseCollection = d.Base()
for i in range(10):
baseCollection.Insert({"a":1})
baseCollection.FindOne(1)
baseCollection.DeleteOne(2)
print(baseCollection.Find({"a":1}))
baseCollection.Delete({"a":1})
print(baseCollection.Find({"a":1}))
d.Close()
removeFiles()
| StarcoderdataPython |
3483467 | <gh_stars>1-10
import json, _jsonnet
with open('Tools/qns_gen/test.jsonnet') as f:
data = f.read()
data = _jsonnet.evaluate_snippet("snippet",data)
# print(data)
with open('Tools/qns_gen/_qns.json', 'w') as f : f.write(data) | StarcoderdataPython |
373700 | #!/usr/bin/python3
from datetime import datetime
now = datetime.now()
year = now.year
day_of_year = now.timetuple().tm_yday
hour = now.hour
minute = now.minute
second = now.second
one_hour = 1 / 24 / 365
one_minute = one_hour / 60
one_second = one_minute / 60
exact_day = (day_of_year / 365)
exact_hour = hour * one_hour
exact_minute = now.minute * one_minute
exact_second = now.second * one_second
exact_decimal = exact_day + exact_hour + exact_minute + exact_second
decimal = float('%.8f'%(exact_decimal))
#error = decimal * 365 - day_of_year - (hour / 24)
#error_hours = error * 24
stardate = year + decimal
#print("stardate for", year, day_of_year, hour)
#print("error:", error, "in hours:", error_hours)
print(stardate)
| StarcoderdataPython |
4908369 | import numpy as np
from gym import spaces
from ..miniworld import MiniWorldEnv, Room
from ..entity import Box, Agent
from ..params import DEFAULT_PARAMS
from ..math import *
import random
class HAWKMaze(MiniWorldEnv):
"""
HAWK-Maze zum Trainieren eines DDQ-Agents
"""
def __init__(self, **kwargs):
"------Parameter-Einstellungen-------"
max_steps = 800
domain_rand = False
# Maze
self.num_rows = 2 # Zeilen des HAWK-Mazes
self.num_cols = 2 # Spalten des HAWK-Mazes
self.room_size = 5 # Raumgröße
self.gap_size = 0.25 # Wanddicke/Abstand zwischen 2 Räumen
# Agent
self.agent_groesse = 0.6 # Radius Agent (für Kollisionsmodell/Aufheben-Aktion)
self.schritt_agent = 0.50 # Schrittweite
self.schritt_toleranz = 0.02 # Schrittweite-Toleranz, wenn Domain-Rand. aktiviert
self.phi_agent = 90 # Drehwinkel
self.phi_tol = 0.0 # Drehwinkel-Toleranz, wenn Domain-Rand. aktiviert
self.start_winkel = [0, 0.5*math.pi, math.pi, -0.5*math.pi] # mögl. Start-Winkel des Agents relativ zum Env
# Objekte
self.anzahl_objs = 20 # wenn None: Anzahl zufällig aus (min, max)
self.anzahl_objs_min = 1 # untere Grenze für Anzahl zufälliger Objekte
self.anzahl_objs_max = 4 # obere Grenze für Anzahl zufälliger objekte
"------Setzen der Parameter------"
params = DEFAULT_PARAMS.copy()
params.set('forward_step', self.schritt_agent, self.schritt_agent - self.schritt_toleranz, self.schritt_agent + self.schritt_toleranz)
params.set('turn_step', self.phi_agent, self.phi_agent - self.phi_tol, self.phi_agent + self.phi_tol)
super().__init__(
max_episode_steps=max_steps,
domain_rand=domain_rand,
params=params,
**kwargs
)
"------Reduzierung des Aktionsraumes------"
# Mögliche Aktionen:
# turn_left = 0 | turn_right = 1 | move_forward = 2 | move_back = 3 | pickup = 4
self.action_space = spaces.Discrete(self.actions.pickup+1)
def _reward(self):
#Ziel ist es, den Agent mit möglichst wenigen Aktionen alle verfügbaren...
#...Objekte einsammeln zu lassen. Die Belohnung pro eingesammeltem Objekt...
#...erhöht sich linear, da der Schwierigkeitsgrad mit weniger verbleibenden...
#...Objekten zunimmt. Außerdem verringert jede ausgeführte Aktion die Belohnung...
#...etwas, um die Wahl des kürzesten Weges anzustreben."
#return 1.0 * self.num_picked_up - 0.2 * (self.step_count / self.max_episode_steps)
# Konstante Belohnung für jedes eingesammelte Objekt. Abzug für Anzahl benötigter Aktionen
if self.agent.carrying:
return 1.0
else:
return - 0.9 * (1 / self.max_episode_steps) #für jeden Schritt ohne Kiste wird ein der Reward etwas verringert
def step(self, action):
obs, reward, done, info = super().step(action)
reward = self._reward() # Reward berechnen
"Box einsammeln mit pick_up Aktion"
if self.agent.carrying:
self.entities.remove(self.agent.carrying)
self.agent.carrying = None
self.num_picked_up += 1
self.step_count = 0 # Timer nach erfolgreichem Aufsammeln zurücksetzen
if self.num_picked_up == self.anzahl_objs:
done = True # Episode beenden nach dem letzten Objekt
return obs, reward, done, info
def reset(self):
"""
Reset the simulation at the start of a new episode
This also randomizes many environment parameters (domain randomization)
"""
# Step count since episode start
self.step_count = 0
# Create the agent
self.agent = Agent()
self.agent.radius = self.agent_groesse # Anpassen der Agent-Größe
# List of entities contained
self.entities = []
# List of rooms in the world
self.rooms = []
# Wall segments for collision detection
# Shape is (N, 2, 3)
self.wall_segs = []
# Generate the world
self._gen_world()
# Check if domain randomization is enabled or not
rand = self.rand if self.domain_rand else None
# Randomize elements of the world (domain randomization)
self.params.sample_many(rand, self, [
'sky_color',
'light_pos',
'light_color',
'light_ambient'
])
# Get the max forward step distance
self.max_forward_step = self.params.get_max('forward_step')
# Randomize parameters of the entities
for ent in self.entities:
ent.randomize(self.params, rand)
# Compute the min and max x, z extents of the whole floorplan
self.min_x = min([r.min_x for r in self.rooms])
self.max_x = max([r.max_x for r in self.rooms])
self.min_z = min([r.min_z for r in self.rooms])
self.max_z = max([r.max_z for r in self.rooms])
# Generate static data
if len(self.wall_segs) == 0:
self._gen_static_data()
# Pre-compile static parts of the environment into a display list
self._render_static()
# Generate the first camera image
obs = self.render_obs()
# Return first observation
return obs
"------Erstellung des Raumes in MiniWorld------"
def _gen_world(self):
rows = []
# For each row
for j in range(self.num_rows):
row = []
# For each column
for i in range(self.num_cols):
min_x = i * (self.room_size + self.gap_size)
max_x = min_x + self.room_size
min_z = j * (self.room_size + self.gap_size)
max_z = min_z + self.room_size
room = self.add_rect_room(
min_x=min_x,
max_x=max_x,
min_z=min_z,
max_z=max_z,
wall_tex='brick_wall',
floor_tex='asphalt'
)
row.append(room)
rows.append(row)
visited = set()
'Erstellung des Labyrinths und Plazierung der Objekte + Agent'
def visit(i, j):
"""
Recursive backtracking maze construction algorithm
Quelle: https://stackoverflow.com/questions/38502
"""
'Raumproportionen auslesen'
room = rows[j][i]
'Wenn Nachbar schon bekannt wird Raum hinzugefügt'
visited.add(room)
'Nachbar nach Zufallsprinzip festlegen'
neighbors = self.rand.subset([(0, 1), (0, -1), (-1, 0), (1, 0)], 4)
'Für jeden möglichen Nachbarn ausführen'
for dj, di in neighbors:
ni = i + di
nj = j + dj
'Befindet sich der Nachbar im definierten Raum, soll Algorithmus fortgeführt werden'
if nj < 0 or nj >= self.num_rows:
continue
if ni < 0 or ni >= self.num_cols:
continue
'Definition des Nachbarn an Zeite und Spalte orientieren'
neighbor = rows[nj][ni]
'Ist der Nachbar schon bekannt, Algo fortführen'
if neighbor in visited:
continue
'Alle Nachbarn gesichtet -> Nachbarn werden verbunden zu einem Labyrinth'
if di == 0:
self.connect_rooms(room, neighbor, min_x=room.min_x, max_x=room.max_x)
elif dj == 0:
self.connect_rooms(room, neighbor, min_z=room.min_z, max_z=room.max_z)
'Rekursiever Aufruf der Funktion'
visit(ni, nj)
'Backtracking-Algo aufrühren -> Startpunkt oben Links'
visit(0, 0)
'Erstellen und plazieren der Objekte (Box)'
'Boxen werden horizontal ausgerichtet'
if self.anzahl_objs == None:
self.anzahl_objs = random.randint(self.anzahl_objs_min, self.anzahl_objs_max)
for obj in range(self.anzahl_objs):
self.box = self.place_entity(Box(color='red', size=0.9), dir=0)
'Zähler auf 0 setzen'
self.num_picked_up = 0
'Plazieren des Agents'
self.place_agent(dir=random.choice(self.start_winkel)) # rechtwinklige Ausrichtung des Agents am Maze
| StarcoderdataPython |
4872355 | <reponame>Captmoonshot/pydango
#!/usr/bin/env python3
import argparse
from sqlalchemy import create_engine
from pydango import (
cinephile,
theater_owner,
cinephile_sqlite,
theater_owner_sqlite,
)
from pydango.primary_func import (
create_session,
create_sqlite_session,
insert_account_data,
insert_actor_data,
insert_category_data,
insert_director_data,
insert_movie_data,
insert_theater_data,
)
from pydango.secondary_func import (
print_header,
find_user_intent
)
from pydango.tables import (
Base
)
def get_args():
parser = argparse.ArgumentParser(description='Pydango Database')
parser.add_argument('-d', '--database', metavar='database',
default='postgresql', help='Provide a database type: (SQLite or PostgreSQL)')
return parser.parse_args()
def main():
args = get_args()
if args.database == 'sqlite':
# Option to use SQLite instead of PostgreSQL
engine = create_engine('sqlite:///sqlite3.db')
# sqlite session
engine, session = create_sqlite_session(engine=engine)
Base.metadata.create_all(engine)
# Autoload some data without user/CLI interface
insert_category_data(session=session)
insert_director_data(session=session)
insert_actor_data(session=session)
insert_account_data(session=session)
insert_movie_data(session=session)
insert_theater_data(session=session)
print_header()
try:
while True:
if find_user_intent() == 'find':
cinephile_sqlite.run()
else:
theater_owner_sqlite.run()
except KeyboardInterrupt:
return
session.close()
else:
# postgresql session
engine, session = create_session()
Base.metadata.create_all(engine)
# Autoload some data without user/CLI interface
insert_category_data(session=session)
insert_director_data(session=session)
insert_actor_data(session=session)
insert_account_data(session=session)
insert_movie_data(session=session)
insert_theater_data(session=session)
print_header()
try:
while True:
if find_user_intent() == 'find':
cinephile.run()
else:
theater_owner.run()
except KeyboardInterrupt:
return
session.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4960869 | <reponame>macodroid/strawberry_disease_classification
import os
import cv2
import numpy as np
import shutil
from bouding_box import get_all_points
from data_utils import separate_files
def extract_infected_area(image_name, json_file, path_to_file):
image = cv2.imread(f'{path_to_file}/{image_name}')
points = get_all_points(f'{path_to_file}/{json_file}')
mask = np.zeros((image.shape[0], image.shape[1]))
for p in points:
cv2.fillConvexPoly(mask, np.asarray(p, dtype=np.int32), 1)
mask = mask.astype(bool)
out = np.zeros_like(image)
out[mask] = image[mask]
return out
def main():
raw_data = 'data'
root_dir = 'extract_dataset'
data_type = ['train', 'test', 'val']
try:
os.mkdir(root_dir)
except:
print('directory exists')
shutil.rmtree(root_dir)
os.mkdir(root_dir)
for d_type in data_type:
path_to_save = f'{root_dir}/{d_type}'
path_to_file = f'{raw_data}/{d_type}'
os.mkdir(path_to_save)
image_name, json_name = separate_files(d_type)
for i in range(len(image_name)):
extracted_disease_img = extract_infected_area(image_name[i], json_name[i], path_to_file)
cv2.imwrite(f'{path_to_save}/extract_{image_name[i]}', extracted_disease_img)
main()
| StarcoderdataPython |
4943450 | <filename>codes/structure3D.py
#!/usr/local/bin/python
import re
f = open("structure3D.html", "w")
f1 = open('Conformer3D_CID_89594.sdf')
f2 = open('Conformer3D_CID_89594.sdf', encoding="utf-8")
f3 = open('Conformer3D_CID_89594.sdf', encoding="utf-8")
length = len(f3.readlines())
for j in range(length):
feedback = re.search("\$\$\$\$", f2.readline())
if feedback:
print(j)
for i in range(j):
S = f1.readline()
if S != '\n':
print(r"'"+S.rstrip('\n')+r'\n'+r"'"+' '+'+'+'\n',file=f)
else:
print(r"'"+r'\n'+r"'"+' '+'+'+'\n',file=f)
print("'$$$$'\n", file=f)
print("\n", file=f)
print("</script>\n", file=f)
print("<style>\n", file=f)
print(" .hlong *{display:inline-block;vertical-align:middle}\n", file=f)
print("</style>\n", file=f)
print("""<div align="center" class="hlong">\n""", file=f)
print("<div>\n", file=f)
print("""<div style="width: 200px; height: 200px;">\n""", file=f)
print("""<canvas id="render-canvas4" style="width: 200px; height: 200px; border: solid 1px #ccc;"></canvas>\n""", file=f)
print("""</div>\n""", file=f)
print("""</div>\n""", file=f)
print("""</div>\n""", file=f)
print("<script>\n", file=f)
print(" if (PubChem3D_WebGL) {\n", file=f)
print(" var M4 = new PubChem3D_WebGL.Molecule({\n", file=f)
print(" canvas: document.getElementById('render-canvas4'),\n", file=f)
print(" settings: {\n", file=f)
print(" quality: 'High',\n", file=f)
print(" backgroundColor: 'Transparent',\n", file=f)
print(" model: 'Ball and Stick'\n", file=f)
print(" }\n", file=f)
print(" })\n", file=f)
print(" setTimeout(function() {\n", file=f)
print(" M4.addMain(sdf)\n", file=f)
print(" M4.animate(true)\n", file=f)
print(" },10)\n", file=f)
print(" }\n", file=f)
print("</script>\n", file=f)
f.close()
with open("structure3D.html", "r", encoding="utf-8") as f:
length = len(f.readlines())
file = open("structure3D.html", "r", encoding="utf-8")
with open("Struc3D.html", "w", encoding="utf-8") as f:
f.write("""<script src="https://pubchem.ncbi.nlm.nih.gov/pcfe/structure-3d-webgl/structure-3d-webgl.min.js"></script>\n""")
f.write("<script>\n")
f.write("// example SDF\n")
f.write("var sdf = ")
for i in range(length):
f.write(file.readline())
| StarcoderdataPython |
6594901 | # coding:utf-8
import time
import json
import datetime
import tornado
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler
from tornado.websocket import WebSocketHandler
from tornado.concurrent import run_on_executor
from tornado import gen
from tornado.options import define, options, parse_command_line
from collections import deque
# 这个并发库在python3自带;在python2需要安装sudo pip install futures
from concurrent.futures import ThreadPoolExecutor
define("port", default=8888, type=int)
settings = {"static_path": "template"}
class just_now_router(RequestHandler):
def get(self):
self.write("i hope just now see you")
class sleep_long_time_router(RequestHandler):
def get(self):
time.sleep(60)
self.write("sleep 60s")
class index_router(RequestHandler):
def get(self):
title1 = ''
try:
title1 = self.get_argument("text1")
except:
self.render("./templates/tailf_index.html", textDiv="")
if title1:
self.render("./templates/tailf_index.html", textDiv="textDiv")
class sleep_async_router(RequestHandler):
executor = ThreadPoolExecutor(2)
def get(self):
# 在执行add_callback方法后马上就会执行下一行代码,而callback函数将在下一轮事件循环中才调用,从而就能实现延迟任务。
# 当有一些耗时操作并不需要返回给请求方时,就可以采用延迟任务的形式,比如发送提醒邮件。
IOLoop.instance().add_callback(self.sleep)
self.write("when i sleep")
return
@run_on_executor
def sleep(self):
print("sleep1 start", datetime.datetime.now())
time.sleep(5)
print("sleep1 end", datetime.datetime.now())
class sleep_coroutine_router(RequestHandler):
@tornado.gen.coroutine
def get(self):
print("sleep2 start", datetime.datetime.now())
tornado.gen.sleep(5)
# sleep(5)
self.write("after sleep, now I'm back %s" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print("sleep2 end", datetime.datetime.now())
class sleep_router(RequestHandler):
executor = ThreadPoolExecutor(10)
@run_on_executor
def get(self):
print("sleep3 start", datetime.datetime.now())
time.sleep(5)
self.write("after sleep, now I'm back %s" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print("sleep3 end", datetime.datetime.now())
class wait_router(RequestHandler):
executor = ThreadPoolExecutor(2)
@tornado.gen.coroutine
def get(self):
result, m = yield tornado.gen.maybe_future(self.wait())
yield self.write({"result": result, "sum": m})
@run_on_executor
def wait(self):
time.sleep(5)
return "success", (4, 5, 6) # 不要 yield 4,5,6
class chat_index_router(RequestHandler):
def get(self):
self.render("./templates/chat_index.html")
class chat_room_router(WebSocketHandler):
users = set() # 用来存放在线用户的容器
def open(self):
self.users.add(self) # 建立连接后添加用户到容器中
for u in self.users: # 向已在线用户发送消息
u.write_message(
u"[%s]-[%s]-进入聊天室" % (self.request.remote_ip, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
def on_message(self, message):
for u in self.users: # 向在线用户广播消息
u.write_message(u"[%s]-[%s]-说:%s" % (
self.request.remote_ip, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), message))
def on_close(self):
self.users.remove(self) # 用户关闭连接后从容器中移除用户
for u in self.users:
u.write_message(
u"[%s]-[%s]-离开聊天室" % (self.request.remote_ip, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
def check_origin(self, origin):
return True # 允许WebSocket的跨域请求
class tailf_router(WebSocketHandler):
def open(self, *args):
print("WebSocket opened")
def on_message(self, message):
def tail(filename, n=10):
'Return the last n lines of a file'
while True:
lines = '<br>'.join(list(deque(open(filename), n)))
print("lines:", lines)
self.write_message(lines)
if lines:
time.sleep(0.5)
if "Control-C" in lines:
self.close()
break
tail('ip.txt')
def on_close(self):
print("WebSocket closed")
class send_router(WebSocketHandler):
clients = set()
def open(self):
send_router.clients.add(self)
self.write_message(json.dumps({'input': 'connected...'}))
self.stream.set_nodelay(True)
def on_message(self, message):
message = json.loads(message)
self.write_message(json.dumps({'input': 'response...'}))
i = 0
while i <= 10:
i += 1
self.write_message(json.dumps(message))
time.sleep(1)
# 服务器主动关闭
self.close()
send_router.clients.remove(self)
def on_close(self):
# 客户端主动关闭
send_router.clients.remove(self)
app = tornado.web.Application([
(r'/', index_router),
(r'/text1', index_router),
(r"/justnow", just_now_router),
(r"/sleep60", sleep_long_time_router),
(r"/wait", wait_router),
(r'/ws', tailf_router),
(r'/sleep1', sleep_async_router),
(r'/sleep2', sleep_coroutine_router),
(r'/sleep3', sleep_router),
(r'/send', send_router),
(r'/into_chat', chat_index_router),
(r'/chat', chat_room_router),
], **settings)
if __name__ == '__main__':
app.listen(options.port)
IOLoop.instance().start()
# 还是那句话:既然选择异步,那就全流程都得异步.
| StarcoderdataPython |
1935815 | <reponame>lel352/Curso-Python<filename>aulapythonpoo/exercicio02/app.py
from aulapythonpoo.exercicio02.classes.calcipv4 import CalcIPv4
def main():
""" Exemplos de Uso """
# Com prefixo
calc_ipv4_1 = CalcIPv4(ip='192.168.0.128', prefixo=30)
print(f'IP: {calc_ipv4_1.ip}')
print(f'Máscara: {calc_ipv4_1.mascara}')
print(f'Rede: {calc_ipv4_1.rede}')
print(f'Broadcast: {calc_ipv4_1.broadcast}')
print(f'Prefixo: {calc_ipv4_1.prefixo}')
print(f'Número de IPs da rede: {calc_ipv4_1.numero_ips}')
print('#' * 80)
# Com máscara
calc_ipv4_2 = CalcIPv4(ip='192.168.0.128', mascara='255.255.255.192')
print(f'IP: {calc_ipv4_2.ip}')
print(f'Máscara: {calc_ipv4_2.mascara}')
print(f'Rede: {calc_ipv4_2.rede}')
print(f'Broadcast: {calc_ipv4_2.broadcast}')
print(f'Prefixo: {calc_ipv4_2.prefixo}')
print(f'Número de IPs da rede: {calc_ipv4_2.numero_ips}')
# if __name__ == '__main__':
main()
| StarcoderdataPython |
1933404 | <reponame>StrayRobots/stray
import numpy as np
from stray.scene import Scene
import os
import pickle
import pycocotools.mask as mask_util
import cv2
from stray import linalg
MISSING_SEGMENTATIONS = 1
INCORRECT_NUMBER_OF_SEGMENTATIONS = 2
def validate_segmentations(scene):
length = len(scene.get_image_filepaths())
for bbox_id in range(len(scene.bounding_boxes)):
segmentation_path = os.path.join(scene.scene_path, "segmentation", f"instance_{bbox_id}")
if not os.path.exists(segmentation_path):
print(f"Missing segmentations at {segmentation_path}")
exit(MISSING_SEGMENTATIONS)
elif len([f for f in os.listdir(segmentation_path) if ".pickle" in f]) != length:
print(f"Wrong number of segmentations at {segmentation_path}")
exit(INCORRECT_NUMBER_OF_SEGMENTATIONS)
def bbox_2d_from_mesh(camera, T_WC, object_mesh):
T_CW = np.linalg.inv(T_WC)
vertices = object_mesh.vertices
image_points = camera.project(vertices, T_CW)
upper_left = image_points.min(axis=0)
lower_right = image_points.max(axis=0)
return upper_left.tolist() + lower_right.tolist()
def bbox_2d_from_pointcloud(camera, T_WC, pointcloud):
# filter out points behind camera.
T_CW = np.linalg.inv(T_WC)
pc_local = linalg.transform_points(T_CW, pointcloud)
pc = pointcloud[pc_local[:, 2] > 0.0]
if pc.shape[0] == 0:
return [0, 0, 0, 0]
points2d = camera.project(pc, np.linalg.inv(T_WC))
upper_left = points2d.min(axis=0)
image_size = np.array(camera.size)
upper_left = np.maximum(points2d.min(axis=0), 0)
upper_left = np.minimum(upper_left, image_size)
lower_right = np.maximum(points2d.max(axis=0), 0)
lower_right = np.minimum(lower_right, image_size)
return upper_left.tolist() + lower_right.tolist()
def get_bbox_3d_corners(camera, T_WC, bbox_3d):
T_CW = np.linalg.inv(T_WC)
size = bbox_3d.dimensions
corners_world = []
for x_bbox in [-size[0]/2, size[0]/2]:
for y_bbox in [-size[1]/2, size[1]/2]:
for z_bbox in [-size[2]/2, size[2]/2]:
corners_world.append(bbox_3d.position + bbox_3d.orientation.as_matrix()@np.array([x_bbox, y_bbox, z_bbox]))
image_points = camera.project(np.array(corners_world), T_CW)
return image_points
def bbox_2d_from_bbox_3d(camera, T_WC, bbox_3d):
image_points = get_bbox_3d_corners(camera, T_WC, bbox_3d)
upper_left = image_points.min(axis=0)
lower_right = image_points.max(axis=0)
return upper_left.tolist() + lower_right.tolist()
def bbox_2d_from_mask(scene, instance_id, i):
with open(os.path.join(scene.scene_path, "segmentation", f"instance_{instance_id}", f"{i:06}.pickle"), 'rb') as handle:
segmentation = pickle.load(handle)
mask = mask_util.decode(segmentation)
x,y,w,h = cv2.boundingRect(mask)
return [x,y,x+w,y+h]
def compute_instance_keypoints(camera, T_WC, instance, instance_keypoints, max_num_keypoints=0):
T_CW = np.linalg.inv(T_WC)
world_keypoints = [instance.position + instance.orientation.as_matrix()@np.multiply(np.array(kp),instance.dimensions/2) for kp in instance_keypoints]
image_keypoints = camera.project(np.array(world_keypoints), T_CW)
flat_keypoints = []
for image_point in image_keypoints:
flat_keypoints.append(image_point[0])
flat_keypoints.append(image_point[1])
flat_keypoints.append(2)
while len(flat_keypoints) < max_num_keypoints:
flat_keypoints += [0, 0, 0]
return flat_keypoints
def get_scene_dataset_metadata(scene_paths):
instance_categories = []
max_num_keypoints = 0
for scene_path in scene_paths:
scene = Scene(scene_path)
metadata = scene.metadata
for bbox_id in scene.bbox_categories:
if not bbox_id in instance_categories:
instance_categories.append(bbox_id)
if metadata:
bbox_metadata_instance = [instance for instance in metadata["instances"] if instance["instance_id"] == bbox_id]
if len(bbox_metadata_instance) > 0:
num_keypoints = len(bbox_metadata_instance[0].get("keypoints", []))
max_num_keypoints = max(max_num_keypoints, num_keypoints)
instance_category_mapping = {}
for i, instance_id in enumerate(instance_categories):
instance_category_mapping[f"instance_{instance_id}"] = i
return {
'max_num_keypoints': max_num_keypoints,
'instance_category_mapping': instance_category_mapping,
'thing_classes': list(instance_category_mapping.keys())
}
def get_detectron2_dataset_function(scene_paths, dataset_metadata, use_bbox_from_mask, use_segmentation):
def inner():
examples = []
for scene_path in scene_paths:
scene = Scene(scene_path)
if use_segmentation:
validate_segmentations(scene)
width, height = scene.image_size()
images = scene.get_image_filepaths()
metadata = scene.metadata
image_id = 0
max_num_keypoints = dataset_metadata["max_num_keypoints"]
for image_path in images:
filename = os.path.basename(image_path)
image_idx = int(filename.split(".jpg")[0])
annotations = []
for bbox_id, bbox in enumerate(scene.bounding_boxes):
if use_bbox_from_mask:
bbox_flat = bbox_2d_from_mask(scene, bbox_id, image_idx)
else:
bbox_flat = bbox_2d_from_bbox_3d(scene.camera(), scene.poses[image_idx], bbox)
annotation = {
'category_id': dataset_metadata['instance_category_mapping'][f"instance_{bbox.instance_id}"],
'bbox': bbox_flat,
'bbox_mode': 0
}
if use_segmentation:
with open(os.path.join(scene_path, "segmentation", f"instance_{bbox_id}", f"{image_idx:06}.pickle"), 'rb') as handle:
segmentation = pickle.load(handle)
annotation["segmentation"] = segmentation
if metadata:
bbox_metadata_instance = [instance for instance in metadata["instances"] if instance["instance_id"] == bbox.instance_id]
bbox_keypoints = bbox_metadata_instance[0].get("keypoints", [])
if len(bbox_metadata_instance) > 0 and len(bbox_keypoints) > 0:
image_keypoints = compute_instance_keypoints(scene.camera(), scene.poses[image_idx], bbox, bbox_metadata_instance[0]["keypoints"], max_num_keypoints)
annotation["keypoints"] = image_keypoints
else:
annotation["keypoints"] = [0 for _ in range(max_num_keypoints*3)]
annotations.append(annotation)
examples.append({
'file_name': image_path,
'image_id': image_id,
'height': height,
'width': width,
'annotations': annotations
})
image_id += 1
return examples
return inner
| StarcoderdataPython |
1811474 | from flask import Flask, jsonify, render_template, request
from datasource.covid19 import Covid19
app = Flask(__name__)
# Returns 200 ok. Used for "Test connection" on the datasource config page.
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
# Used by the find metric options on the query tab in panels
@app.route('/search', methods=['GET', 'POST'])
def search():
covid19 = Covid19()
return jsonify(covid19.metrics())
# Return metrics based on input
@app.route('/query', methods=['POST'])
def query():
covid19 = Covid19()
series = []
for target in request.json["targets"]:
series.append({
"target": target["target"],
"datapoints": covid19.timeseries(target["target"])
})
return jsonify(series)
# Returns annotations
@app.route('/annotations')
def annotations():
return jsonify([])
| StarcoderdataPython |
1615700 | class Solution:
def numDistinct(self, s: str, t: str) -> int:
m = len(s)
n = len(t)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
dp[i][0] = 1
for i in range(1, m + 1):
for j in range(1, n + 1):
if s[i - 1] == t[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + dp[i - 1][j]
else:
dp[i][j] = dp[i - 1][j]
return dp[m][n]
| StarcoderdataPython |
6608363 | <reponame>zefanyang/organseg_dags<filename>models/vnet/vnet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
def passthrough(x, **kwargs):
return x
def non_linearity(elu, nchan):
if elu:
return nn.ELU(inplace=True)
else:
return nn.PReLU(nchan)
class LUConv(nn.Module):
def __init__(self, nchan, elu):
super(LUConv, self).__init__()
self.relu1 = non_linearity(elu, nchan)
self.conv1 = nn.Conv3d(nchan, nchan, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm3d(nchan)
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
return out
def _make_nConv(nchan, depth, elu):
layers = []
for _ in range(depth):
layers.append(LUConv(nchan, elu))
return nn.Sequential(*layers)
class InputTransition(nn.Module):
def __init__(self, outChans, elu):
super(InputTransition, self).__init__()
self.conv1 = nn.Conv3d(1, 16, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm3d(16)
self.relu1 = non_linearity(elu, 16)
def forward(self, x):
# do we want a PRELU here as well?
out = self.bn1(self.conv1(x))
# split input in to 16 channels
x16 = torch.cat((x, x, x, x, x, x, x, x,
x, x, x, x, x, x, x, x), 1)
out = self.relu1(torch.add(out, x16))
return out
class DownTransition(nn.Module):
def __init__(self, inChans, nConvs, elu, dropout=False):
super(DownTransition, self).__init__()
outChans = 2*inChans
self.down_conv = nn.Conv3d(inChans, outChans, kernel_size=2, stride=2)
self.bn1 = nn.BatchNorm3d(outChans)
self.do1 = passthrough
self.relu1 = non_linearity(elu, outChans)
self.relu2 = non_linearity(elu, outChans)
if dropout:
self.do1 = nn.Dropout3d()
self.ops = _make_nConv(outChans, nConvs, elu)
def forward(self, x):
down = self.relu1(self.bn1(self.down_conv(x)))
out = self.do1(down)
out = self.ops(out)
out = self.relu2(torch.add(out, down))
return out
class UpTransition(nn.Module):
def __init__(self, inChans, outChans, nConvs, elu, dropout=False):
super(UpTransition, self).__init__()
self.up_conv = nn.ConvTranspose3d(inChans, outChans // 2, kernel_size=2, stride=2)
self.bn1 = nn.BatchNorm3d(outChans // 2)
self.do1 = passthrough
self.do2 = nn.Dropout3d()
self.relu1 = non_linearity(elu, outChans // 2)
self.relu2 = non_linearity(elu, outChans)
if dropout:
self.do1 = nn.Dropout3d()
self.ops = _make_nConv(outChans, nConvs, elu)
def forward(self, x, skipx):
out = self.do1(x)
skipxdo = self.do2(skipx)
out = self.relu1(self.bn1(self.up_conv(out))) # double resolution, half channels, e.g., 256 -> 128
xcat = torch.cat((out, skipxdo), 1) # 128 cat 128 -> 256
out = self.ops(xcat) # 256 -> 256
out = self.relu2(torch.add(out, xcat)) # shortcut connection
return out
class OutputTransition(nn.Module):
def __init__(self, inChans, elu):
super(OutputTransition, self).__init__()
self.conv1 = nn.Conv3d(inChans, 9, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm3d(9)
self.conv2 = nn.Conv3d(9, 9, kernel_size=1)
self.relu1 = non_linearity(elu, 9)
# if nll:
# self.softmax = F.log_softmax
# else:
# self.softmax = F.softmax
def forward(self, x):
# convolve 32 down to 2 channels
out = self.relu1(self.bn1(self.conv1(x)))
out = self.conv2(out)
# make channels the last axis
# out = out.permute(0, 2, 3, 4, 1).contiguous()
# flatten
# out = out.view(out.numel() // 2, 2)
# out = self.softmax(out)
# treat channel 0 as the predicted output
return out
class VNetKnl3(nn.Module):
# the number of convolutions in each layer corresponds
# to what is in the actual prototxt, not the intent
def __init__(self, elu=True):
super(VNetKnl3, self).__init__()
self.in_tr = InputTransition(16, elu)
self.down_tr32 = DownTransition(16, 1, elu)
self.down_tr64 = DownTransition(32, 2, elu)
# The performance decreases when dropout is closed
self.down_tr128 = DownTransition(64, 3, elu, dropout=True)
self.down_tr256 = DownTransition(128, 2, elu, dropout=True)
self.up_tr256 = UpTransition(256, 256, 2, elu, dropout=True)
self.up_tr128 = UpTransition(256, 128, 2, elu, dropout=True)
self.up_tr64 = UpTransition(128, 64, 1, elu)
self.up_tr32 = UpTransition(64, 32, 1, elu)
self.out_tr = OutputTransition(32, elu)
def forward(self, x):
out16 = self.in_tr(x)
out32 = self.down_tr32(out16)
out64 = self.down_tr64(out32)
out128 = self.down_tr128(out64)
out256 = self.down_tr256(out128)
out = self.up_tr256(out256, out128)
out = self.up_tr128(out, out64)
out = self.up_tr64(out, out32)
out = self.up_tr32(out, out16)
out = self.out_tr(out)
return out
| StarcoderdataPython |
9709855 | <reponame>PyDataMadrid2016/Conference-Info
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression()
logit.fit(df[['age', 'distance']], df['attended'])
def get_y(x): return -(logit.intercept_[0] + logit.coef_[0,1] * x) / logit.coef_[0,0]
plot = base_plot()
min_x, max_x = df['distance'].min(), df['distance'].max()
plot.line(x=[min_x, max_x],
y=[get_y(min_x), get_y(max_x)],
line_color='black',
line_width=2)
_ = show(plot) | StarcoderdataPython |
4931911 | <reponame>iecedge/xos
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch
import mock
import pdb
import networkx as nx
import os
import sys
test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
xos_dir = os.path.join(test_path, "..", "..", "..")
class TestServices(unittest.TestCase):
def setUp(self):
self.sys_path_save = sys.path
self.cwd_save = os.getcwd()
sys.path.append(xos_dir)
sys.path.append(os.path.join(xos_dir, "synchronizers", "new_base"))
sys.path.append(
os.path.join(xos_dir, "synchronizers", "new_base", "tests", "steps")
)
config = os.path.join(test_path, "test_config.yaml")
from xosconfig import Config
Config.clear()
Config.init(config, "synchronizer-config-schema.yaml")
from synchronizers.new_base.mock_modelaccessor_build import (
build_mock_modelaccessor,
)
build_mock_modelaccessor(xos_dir, services_dir=None, service_xprotos=[])
os.chdir(os.path.join(test_path, "..")) # config references tests/model-deps
import event_loop
reload(event_loop)
import backend
reload(backend)
from modelaccessor import model_accessor
# import all class names to globals
for (k, v) in model_accessor.all_model_classes.items():
globals()[k] = v
b = backend.Backend()
steps_dir = Config.get("steps_dir")
self.steps = b.load_sync_step_modules(steps_dir)
self.synchronizer = event_loop.XOSObserver(self.steps)
def tearDown(self):
sys.path = self.sys_path_save
os.chdir(self.cwd_save)
def test_service_models(self):
s = Service()
a = ServiceInstance(owner=s)
cohorts = self.synchronizer.compute_dependent_cohorts([a, s], False)
self.assertIn([s, a], cohorts)
cohorts = self.synchronizer.compute_dependent_cohorts([s, a], False)
self.assertIn([s, a], cohorts)
cohorts = self.synchronizer.compute_dependent_cohorts([a, s], True)
self.assertIn([a, s], cohorts)
cohorts = self.synchronizer.compute_dependent_cohorts([s, a], True)
self.assertIn([a, s], cohorts)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
5068425 | <reponame>joesingo/tom_education<filename>tom_education/utils.py
def assert_valid_suffix(filename, allowed_suffixes):
"""
Check that `filename` has one of the strings in `allowed_suffixes` as a
suffix. Raises an AssertionError if not.
"""
if not any(filename.endswith(suffix) for suffix in allowed_suffixes):
err_msg = (
"File '{}' does not end an allowed filename suffix ({})"
.format(filename, ', '.join(allowed_suffixes))
)
raise AssertionError(err_msg)
| StarcoderdataPython |
9619341 | import numpy as np
from hydroDL.utils import grid
from hydroDL.post import axplot, figplot
import os
import rnnSMAP
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import imp
imp.reload(rnnSMAP)
rnnSMAP.reload()
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 6})
# temporal test
trainName = 'CONUSv2f1'
testName = 'CONUSv2f2'
out = trainName+'_y15_Forcing_dr60'
rootDB = rnnSMAP.kPath['DB_L3_NA']
rootOut = rnnSMAP.kPath['Out_L3_NA']
predField = 'LSTM'
targetField = 'SMAP'
ds = rnnSMAP.classDB.DatasetPost(
rootDB=rootDB, subsetName=testName, yrLst=[2015])
ds.readData(var='SMAP_AM', field='SMAP')
ds.readPred(rootOut=rootOut, out=out, field='LSTM')
statErr = ds.statCalError(predField='LSTM', targetField='SMAP')
lat = ds.crd[:, 0]
lon = ds.crd[:, 1]
gridRMSE, uy, ux = grid.array2grid(statErr.RMSE, lat=lat, lon=lon)
gridCorr, uy, ux = grid.array2grid(statErr.rho, lat=lat, lon=lon)
fig, axes = plt.subplots(2, 1, figsize=(9, 10))
axplot.mapGrid(axes[0], uy, ux, gridRMSE, vRange=[0, 0.1], cmap=plt.cm.jet)
axes[0].set_title('Spatial Test RMSE')
axplot.mapGrid(axes[1], uy, ux, gridCorr, vRange=[0.5, 1], cmap=plt.cm.jet)
axes[1].set_title('Spatial Test Correlation')
fig.show()
np.nanmean(statErr.RMSE)
np.nanmean(statErr.rho)
| StarcoderdataPython |
73090 | from pathlib import Path
import json
from django.core.management import call_command
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
import pytest
from geocontrib.models.project import Project
from geocontrib.models.user import UserLevelPermission
from geocontrib.models.user import User
def verify_or_create_json(filename, json_result):
path = Path(filename)
if path.exists():
with path.open() as fp:
json_expected = json.load(fp)
assert json_expected == json_result
else:
with path.open("w") as fp:
json.dump(json_result, fp)
@pytest.mark.skip("""
Test en echec : il faidrait lancer celery avec la conf de test
ou regarder la doc celery pour le tester
On peut lancer le test à la main comme ça
curl --user admin:passpass 'http://localhost:8000/api/import-tasks/' -F 'feature_type_slug=1-dfsdfs' -F 'json_file=@api/tests/data/test_features_featuretype_admin.json'
et ensuite controller qu'on a bien le même feature type via DRF : http://127.0.0.1:8000/api/features/?feature_type__slug=1-dfsdfs
""")
@pytest.mark.django_db
@pytest.mark.freeze_time('2021-08-05')
def test_import_post(api_client):
# Given
call_command("loaddata", "geocontrib/data/perm.json", verbosity=0)
call_command("loaddata", "api/tests/data/test_import_tasks.json", verbosity=0)
user = User.objects.get(username="admin")
api_client.force_authenticate(user=user)
# When importing some features
# create task
filename = 'export_projet.json'
with open("api/tests/data/test_features_featuretype_admin.json", 'rb') as fp:
simple_file = SimpleUploadedFile(filename,
fp.read(),
content_type='multipart/form-data')
result = api_client.post('/api/import-tasks/',
{
"feature_type_slug": "1-dfsdfs",
"json_file": simple_file,
})
assert result.status_code == 200
# run task
# Then Ensure admin feature type => get all published features of the feature type
result = api_client.get('/api/features/?feature_type__slug=1-dfsdfs')
assert result.status_code == 200
verify_or_create_json("api/tests/data/test_features_featuretype_admin.json", result.json())
| StarcoderdataPython |
4934854 | <filename>cyder/core/system/migrations/0001_initial.py
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'System'
db.create_table('system', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('department', self.gf('django.db.models.fields.CharField')(max_length=255)),
('location', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('system', ['System'])
# Adding unique constraint on 'System', fields ['name', 'location', 'department']
db.create_unique('system', ['name', 'location', 'department'])
# Adding model 'SystemKeyValue'
db.create_table('system_key_value', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=255)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
('is_quoted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('system', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['system.System'])),
))
db.send_create_signal('system', ['SystemKeyValue'])
# Adding unique constraint on 'SystemKeyValue', fields ['key', 'value', 'system']
db.create_unique('system_key_value', ['key', 'value', 'system_id'])
def backwards(self, orm):
# Removing unique constraint on 'SystemKeyValue', fields ['key', 'value', 'system']
db.delete_unique('system_key_value', ['key', 'value', 'system_id'])
# Removing unique constraint on 'System', fields ['name', 'location', 'department']
db.delete_unique('system', ['name', 'location', 'department'])
# Deleting model 'System'
db.delete_table('system')
# Deleting model 'SystemKeyValue'
db.delete_table('system_key_value')
models = {
'system.system': {
'Meta': {'unique_together': "(('name', 'location', 'department'),)", 'object_name': 'System', 'db_table': "'system'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'system.systemkeyvalue': {
'Meta': {'unique_together': "(('key', 'value', 'system'),)", 'object_name': 'SystemKeyValue', 'db_table': "'system_key_value'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_quoted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['system.System']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['system'] | StarcoderdataPython |
8100588 | import pytest
from starlette.applications import Starlette
from starlette.exceptions import HTTPException
from starlette.testclient import TestClient
import layaberr.starlette
@pytest.fixture
def client():
app = Starlette(exception_handlers=layaberr.starlette.exception_handlers)
@app.route("/bad_request")
def bad_request(request):
raise HTTPException(400)
@app.route("/bad_request_detail")
def bad_request(request):
raise HTTPException(400, detail="Error message")
@app.route("/default_error")
def default_error(request):
raise Exception
return TestClient(app, raise_server_exceptions=False)
def test_bad_request(client):
response = client.get("/bad_request")
assert response.status_code == 400
assert response.text == "Bad Request"
def test_bad_request_detail(client):
response = client.get("/bad_request_detail")
assert response.status_code == 400
assert response.text == "Error message"
def test_default(client):
response = client.get("/default_error")
assert response.status_code == 500
assert response.json() == ""
| StarcoderdataPython |
3217253 | <filename>parkstay/cron.py
from datetime import date, timedelta
from django_cron import CronJobBase, Schedule
from parkstay.models import Booking
from parkstay.reports import outstanding_bookings
from parkstay.emails import send_booking_confirmation
from parkstay.utils import oracle_integration
class UnpaidBookingsReportCronJob(CronJobBase):
RUN_AT_TIMES = ['01:00']
schedule = Schedule(run_at_times=RUN_AT_TIMES)
code = 'parkstay.unpaid_bookings_report'
def do(self):
outstanding_bookings()
class OracleIntegrationCronJob(CronJobBase):
RUN_AT_TIMES = ['01:00']
schedule = Schedule(run_at_times=RUN_AT_TIMES)
code = 'parkstay.oracle_integration'
def do(self):
oracle_integration(str(date.today() - timedelta(days=1)), False)
class SendBookingsConfirmationCronJob(CronJobBase):
RUN_EVERY_MINS = 5
schedule = Schedule(run_at_times=RUN_EVERY_MINS)
code = 'parkstay.send_booking_confirmations'
def do(self):
try:
# Update confirmation_status
for b in Booking.objects.all():
if not b.paid and b.confirmation_sent:
b.confirmation_sent = False
b.save()
unconfirmed = Booking.objects.filter(confirmation_sent=False)
if unconfirmed:
for b in unconfirmed:
if b.paid:
send_booking_confirmation(b)
except:
raise
| StarcoderdataPython |
1661644 | <filename>website/forms.py<gh_stars>1-10
from flask_wtf import Form
from wtforms import StringField, BooleanField, PasswordField, HiddenField, validators
from wtforms.validators import DataRequired
search_validator = validators.Regexp('^[a-zA-Z0-9\. ]*$', message="Search contains invalid characters.")
class SearchForm(Form):
keywords = StringField("keywords", validators=[search_validator, validators.Length(max=100, message="Search too long.")])
| StarcoderdataPython |
9720439 | <reponame>mrmonkington/gaphor
from __future__ import annotations
import ast
from dataclasses import replace
from math import atan2
import gaphas
from gaphas.aspect.connector import ConnectionSink
from gaphas.aspect.connector import Connector as ConnectorAspect
from gaphas.connector import Handle
from gaphas.geometry import Rectangle, distance_rectangle_point
from gaphas.solver.constraint import BaseConstraint
from gaphor.core.modeling.diagram import Diagram
from gaphor.core.modeling.event import RevertibeEvent
from gaphor.core.modeling.presentation import Presentation, S
from gaphor.core.modeling.properties import attribute
from gaphor.core.styling import Style, merge_styles
from gaphor.diagram.shapes import stroke
from gaphor.diagram.text import TextAlign, text_point_at_line
class Named:
"""Marker for any NamedElement presentations."""
class Classified(Named):
"""Marker for Classifier presentations."""
def from_package_str(item):
"""Display name space info when it is different, then diagram's or parent's
namespace."""
subject = item.subject
diagram = item.diagram
if not (subject and diagram):
return False
namespace = subject.namespace
parent = item.parent
# if there is a parent (i.e. interaction)
if parent and parent.subject and parent.subject.namespace is not namespace:
return False
return f"(from {namespace.name})" if namespace is not item.diagram.owner else ""
def postload_connect(item: gaphas.Item, handle: gaphas.Handle, target: gaphas.Item):
"""Helper function: when loading a model, handles should be connected as
part of the `postload` step.
This function finds a suitable spot on the `target` item to connect
the handle to.
"""
connector = ConnectorAspect(item, handle, item.diagram.connections)
sink = ConnectionSink(target, distance=1e4)
connector.connect(sink)
class HandlePositionEvent(RevertibeEvent):
requires_transaction = False
def __init__(self, element, index, old_value):
super().__init__(element)
self.index = index
self.old_value = old_value
def revert(self, target):
target.handles()[self.index].pos = self.old_value
target.request_update()
class HandlePositionUpdate:
def watch_handle(self, handle):
handle.pos.add_handler(self._on_handle_position_update)
def remove_watch_handle(self, handle):
handle.pos.remove_handler(self._on_handle_position_update)
def _on_handle_position_update(self, position, old):
for index, handle in enumerate(self.handles()): # type: ignore[attr-defined]
if handle.pos is position:
break
else:
return
self.handle(HandlePositionEvent(self, index, old)) # type: ignore[attr-defined]
# Note: the official documentation is using the terms "Shape" and "Edge" for element and line.
class ElementPresentation(gaphas.Element, HandlePositionUpdate, Presentation[S]):
"""Presentation for Gaphas Element (box-like) items.
To create a shape (boxes, text), assign a shape to `self.shape`. If
the shape can change, for example, because styling needs to change,
implement the method `update_shapes()` and set self.shape there.
"""
_port_sides = ("top", "right", "bottom", "left")
def __init__(self, diagram: Diagram, id=None, shape=None, width=100, height=50):
super().__init__(connections=diagram.connections, diagram=diagram, id=id, width=width, height=height) # type: ignore[call-arg]
self._shape = shape
for handle in self.handles():
self.watch_handle(handle)
diagram.connections.add_constraint(
self, MinimalValueConstraint(self.min_width, width)
)
diagram.connections.add_constraint(
self, MinimalValueConstraint(self.min_height, height)
)
def port_side(self, port):
return self._port_sides[self._ports.index(port)]
def _set_shape(self, shape):
self._shape = shape
self.request_update()
shape = property(lambda s: s._shape, _set_shape)
def update_shapes(self, event=None):
"""Updating the shape configuration, e.g. when extra elements have to
be drawn or when styling changes."""
def update(self, context):
if not self.shape:
self.update_shapes()
if self.shape:
self.min_width, self.min_height = self.shape.size(context)
def draw(self, context):
x, y = self.handles()[0].pos
cairo = context.cairo
cairo.translate(x, y)
self._shape.draw(
context,
Rectangle(0, 0, self.width, self.height),
)
def save(self, save_func):
save_func("matrix", tuple(self.matrix))
save_func("width", self.width)
save_func("height", self.height)
super().save(save_func)
def load(self, name, value):
if name == "width":
self.width = ast.literal_eval(value)
elif name == "height":
self.height = ast.literal_eval(value)
else:
super().load(name, value)
def postload(self):
super().postload()
self.update_shapes()
class MinimalValueConstraint(BaseConstraint):
def __init__(self, var, min):
super().__init__(var)
self._min = min
def solve_for(self, var):
min = self._min
if var is min:
return
var.value = max(var.value, min)
class LinePresentation(gaphas.Line, HandlePositionUpdate, Presentation[S]):
def __init__(
self,
diagram: Diagram,
id=None,
style: Style = {},
shape_head=None,
shape_middle=None,
shape_tail=None,
):
super().__init__(connections=diagram.connections, diagram=diagram, id=id) # type: ignore[call-arg]
self.style = style
self.shape_head = shape_head
self.shape_middle = shape_middle
self.shape_tail = shape_tail
self.fuzziness = 2
self._shape_head_rect = None
self._shape_middle_rect = None
self._shape_tail_rect = None
self.watch("orthogonal", self._on_orthogonal).watch(
"horizontal", self._on_horizontal
)
self.watch_handle(self.head)
self.watch_handle(self.tail)
head = property(lambda self: self._handles[0])
tail = property(lambda self: self._handles[-1])
orthogonal: attribute[int] = attribute("orthogonal", int, 0)
horizontal: attribute[int] = attribute("horizontal", int, 0)
@property
def middle_shape_size(self) -> Rectangle:
return self._shape_middle_rect
def insert_handle(self, index: int, handle: Handle) -> None:
super().insert_handle(index, handle)
self.watch_handle(handle)
def remove_handle(self, handle: Handle) -> None:
self.remove_watch_handle(handle)
super().remove_handle(handle)
def update_shape_bounds(self, context):
def shape_bounds(shape, align):
if shape:
size = shape.size(context)
x, y = text_point_at_line(points, size, align)
return Rectangle(x, y, *size)
points = [h.pos for h in self.handles()]
self._shape_head_rect = shape_bounds(self.shape_head, TextAlign.LEFT)
self._shape_middle_rect = shape_bounds(self.shape_middle, TextAlign.CENTER)
self._shape_tail_rect = shape_bounds(self.shape_tail, TextAlign.RIGHT)
def point(self, x, y):
"""Given a point (x, y) return the distance to the diagram item."""
d0 = super().point(x, y)
ds = [
distance_rectangle_point(shape, (x, y))
for shape in (
self._shape_head_rect,
self._shape_middle_rect,
self._shape_tail_rect,
)
if shape
]
return min(d0, *ds) if ds else d0
def draw(self, context):
def draw_line_end(end_handle, second_handle, draw):
pos, p1 = end_handle.pos, second_handle.pos
angle = atan2(p1.y - pos.y, p1.x - pos.x)
cr.save()
try:
cr.translate(*pos)
cr.rotate(angle)
draw(context)
finally:
cr.restore()
style = merge_styles(context.style, self.style)
context = replace(context, style=style)
self.update_shape_bounds(context)
cr = context.cairo
handles = self._handles
draw_line_end(handles[0], handles[1], self.draw_head)
for h in self._handles[1:-1]:
cr.line_to(*h.pos)
draw_line_end(handles[-1], handles[-2], self.draw_tail)
stroke(context)
for shape, rect in (
(self.shape_head, self._shape_head_rect),
(self.shape_middle, self._shape_middle_rect),
(self.shape_tail, self._shape_tail_rect),
):
if shape:
shape.draw(context, rect)
def save(self, save_func):
def save_connection(name, handle):
c = self._connections.get_connection(handle)
if c:
save_func(name, c.connected)
super().save(save_func)
save_func("matrix", tuple(self.matrix))
points = [tuple(map(float, h.pos)) for h in self.handles()]
save_func("points", points)
save_connection("head-connection", self.head)
save_connection("tail-connection", self.tail)
def load(self, name, value):
if name == "points":
points = ast.literal_eval(value)
for _ in range(len(points) - 2):
h = Handle((0, 0))
self._handles.insert(1, h)
self.watch_handle(h)
for i, p in enumerate(points):
self.handles()[i].pos = p
self._update_ports()
elif name in ("head_connection", "head-connection"):
self._load_head_connection = value
elif name in ("tail_connection", "tail-connection"):
self._load_tail_connection = value
else:
super().load(name, value)
def postload(self):
super().postload()
if self.orthogonal:
self._set_orthogonal(self.orthogonal)
if hasattr(self, "_load_head_connection"):
postload_connect(self, self.head, self._load_head_connection)
assert self._connections.get_connection(self.head)
del self._load_head_connection
if hasattr(self, "_load_tail_connection"):
postload_connect(self, self.tail, self._load_tail_connection)
assert self._connections.get_connection(self.tail)
del self._load_tail_connection
def _on_orthogonal(self, event):
self._set_orthogonal(event.new_value)
def _on_horizontal(self, event):
self._set_horizontal(event.new_value)
| StarcoderdataPython |
6606272 | <reponame>Gianfra45/Proyecto---Programacion
def estCondicional01():
#Definir variables y otros
print("Ejemplo estructura Condicional en Python")
montoP=0
#Datos de entrada
cantidadX=int(input("Ingrese la cantidad de lapices:"))
#Proceso
if cantidadX>=1000:
montoP=cantidadX*0.80
else:
montoP=cantidadX*0.90
#Datos de salida
print("El monto a pagar es:", montoP)
def muestramenoredad():
#Definir variables y otros
pnombre=""
pedad=0
#Datos de entrada
p1nombre=input("Ingrese nombre de 1era persona:")
p1edad=int(input("Ingrese edad de 1era persona:"))
p2nombre=input("Ingrese nombre de 2era persona:")
p2edad=int(input("Ingrese edad de 2era persona:"))
p3nombre=input("Ingrese nombre de 3era persona:")
p3edad=int(input("Ingrese edad de 3era persona:"))
#Proceso
if p1edad<p2edad and p1edad<p3edad:
pnombre=p1nombre
pedad=p1edad
elif p2edad<p1edad and p2edad<p3edad:
pnombre=p2nombre
pedad=p2edad
elif p3edad<p1edad and p3edad<p2edad:
pnombre=p3nombre
pedad=p3edad
elif p1edad==p2edad and p1edad==p3edad:
pnombre=p1nombre+", "+p2nombre+" y "+p3nombre
pedad=p1edad
elif p1edad==p2edad:
pnombre=p1nombre+" y "+p2nombre
pedad=p2edad
else:
pnombre=p2nombre+" y "+p3nombre
pedad=p3edad
#Datos de Salida
print("La(s):persona(s)", pnombre, "tiene(n):", pedad)
estCondicional()
muestramenoredad() | StarcoderdataPython |
9786727 | # Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import random
from tqdm import tqdm
def main():
train_numbers = random.sample(range(1000, 10000), 5000)
train_numbers += random.sample(range(1000, 10000), 5000)
train_numbers += random.sample(range(1000, 10000), 5000)
train_numbers += random.sample(range(10000, 100000), 5000)
train_numbers += random.sample(range(100000, 1000000), 5000)
for idx in range(1, 16):
train_numbers += random.sample(range(0, 1000), 1000)
test_numbers = random.sample(range(0, 1000), 1000)
test_numbers += random.sample(range(0, 1000), 1000)
test_numbers += random.sample(range(0, 10000), 1000)
test_numbers += random.sample(range(0, 100000), 500)
test_numbers += random.sample(range(0, 1000000), 500)
random.shuffle(train_numbers)
random.shuffle(test_numbers)
os.makedirs(os.path.join("data", "amount"), exist_ok=True)
for phase, numbers in [("train", train_numbers), ("valid", test_numbers)]:
with open(os.path.join("data", "amount", "{}.tsv".format(phase)), "w") as fp:
for number in tqdm(numbers, total=len(numbers)):
if random.sample(range(1, 10000), 1)[0] % 20 == 0:
if random.sample(range(1, 10000), 1)[0] % 10 == 0:
fp.write(str(number) + "\t" + str(number) + ",00" + "\n")
else:
exp = "{}".format(random.sample(range(0, 100), 1)[0])
if len(exp) == 1:
exp = "0" + exp
fp.write(str(number) + "." + exp + "\t" + str(number) + "," + exp + "\n")
else:
fnum = str(number)
# if len(fnum) > 3:
# fnum = fnum[:-3] + "," + fnum[-3:]
# if len(fnum) == 7:
# if random.sample(range(1, 10000), 1)[0] % 2 == 0:
# fnum = fnum[0] + ',' + fnum[1:]
if random.sample(range(1, 10000), 1)[0] % 20 == 0:
fp.write(fnum + "\t" + str(number) + ",00" + "\n")
else:
exp = "{}".format(random.sample(range(0, 100), 1)[0])
if len(exp) == 1:
exp = "0" + exp
fp.write(fnum + "," + exp + "\t" + str(number) + "," + exp + "\n")
if __name__ == '__main__':
main()
| StarcoderdataPython |
6411973 | #!/usr/bin/env python3
import os
import sys
# Add the boundfield_renderer app to the PYTHONPATH
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| StarcoderdataPython |
3459951 | import requests
from bs4 import BeautifulSoup
import csv
page = requests.get('https://www.census.gov/programs-surveys/popest.html')
print(page.status_code)
soup = BeautifulSoup(page.text, 'html.parser')
link_set = set()
for link in soup.find_all('a'):
web_links = link.get("href")
print(web_links)
link_set.add(web_links)
csvfile = open('code_python.csv', 'w+', newline='')
writer = csv.writer(csvfile)
writer.writerow(['Links'])
for link in link_set:
writer.writerow([link])
csvfile.close()
| StarcoderdataPython |
11365144 | """Diagnostics support for RainMachine."""
from __future__ import annotations
from typing import Any
from regenmaschine.controller import Controller
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_PASSWORD
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DATA_CONTROLLER, DATA_COORDINATOR, DOMAIN
TO_REDACT = {
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_PASSWORD,
}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
data = hass.data[DOMAIN][entry.entry_id]
coordinators: dict[str, DataUpdateCoordinator] = data[DATA_COORDINATOR]
controller: Controller = data[DATA_CONTROLLER]
return {
"entry": {
"title": entry.title,
"data": async_redact_data(entry.data, TO_REDACT),
"options": dict(entry.options),
},
"data": {
"coordinator": async_redact_data(
{
api_category: controller.data
for api_category, controller in coordinators.items()
},
TO_REDACT,
),
"controller": {
"api_version": controller.api_version,
"hardware_version": controller.hardware_version,
"name": controller.name,
"software_version": controller.software_version,
},
},
}
| StarcoderdataPython |
9620636 | """ Functions to access node data from the Osmo database
"""
import dateutil
import pandas as pd
import pytz
import sqlalchemy
import textwrap
from getpass import getpass
from osmo_jupyter import timezone
def configure_database():
""" Configure a database object for read-only technician access to Osmo data.
Requires user input to collect database password.
Returns:
sqlalchemy Engine object which can be used with other functions in this module.
Raises:
ValueError: if connection can't be made, usually because password is incorrect
"""
print("Enter database password: (if you don't know it, ask someone who does)")
db_engine = sqlalchemy.create_engine(
"mysql+pymysql://{user}:{password}@{host}/{dbname}".format(
user="technician",
password=getpass(), # Ask user for the password to avoid checking it in.
dbname="osmobot",
host="osmobot-db2.cxvkrr48hefm.us-west-2.rds.amazonaws.com",
)
)
try:
connection = db_engine.connect()
except sqlalchemy.exc.OperationalError as e:
raise ValueError(
textwrap.dedent(
f"""Couldn't connect to the database - most likely you typed the password incorrectly.
Please try again.\n Original error: {e}
"""
)
)
else:
connection.close()
return db_engine
SQL_TIME_FORMAT = "%Y-%m-%d %H:%M:%S" # Time format favored by mysql
def _to_aware(local_time):
""" get a timezone-aware object from local time string(s).
Internal function, used only in DB access.
Args:
local_time: string of local time in any valid non-timezone-aware ISO format
Time should be in Osmo HQ local time.
eg. ('2012-01-01 12:00:00', '2012-01-01 12:00', '2012-01-01')
Returns:
timezone-aware datetime object
"""
time = dateutil.parser.isoparse(local_time)
return timezone.OSMO_HQ_TIMEZONE.localize(time)
def _to_utc_string(local_time):
""" Convert a local time string to a string in UTC that can be passed to the database
Internal function, used only in DB access.
Args:
local_time: string of local time in any valid non-timezone-aware ISO format
Time should be in Osmo HQ local time.
eg. ('2012-01-01 12:00:00', '2012-01-01 12:00', '2012-01-01')
Returns:
UTC time string that can be used, for instance, for database queries
"""
aware_datetime = _to_aware(local_time)
return aware_datetime.astimezone(pytz.utc).strftime(SQL_TIME_FORMAT)
def _get_calculation_details_query(
node_ids,
start_time_local,
end_time_local,
include_hub_id=False,
downsample_factor=None,
):
""" Provide a SQL query to download calculation details. Internal function
Args:
node_ids: iterable of node IDs to get data for
start_time_local: string of ISO-formatted start datetime in local time, inclusive
end_time_local: string of ISO-formatted end datetime in local time, inclusive
include_hub_id: if True, the output will include a 'hub_id' column.
Default False because the request including hub_id takes extra time.
downsample_factor: if this is a number, it will be used to select fewer rows.
You should get *roughly* n / downsample_factor samples. If None, no downsampling will occur.
Returns:
SQL query that can be used to get the desired data
"""
start_utc_string = _to_utc_string(start_time_local)
end_utc_string = _to_utc_string(end_time_local)
downsample_clause = (
f"AND MOD(calculation_detail.reading_id, {downsample_factor}) = 0"
if downsample_factor is not None
else ""
)
select_clause = (
"calculation_detail.*, reading.hub_id"
if include_hub_id
else "calculation_detail.*"
)
source_table = (
"calculation_detail join reading on reading.reading_id = calculation_detail.reading_id"
if include_hub_id
else "calculation_detail"
)
nodes_selector = "({})".format(", ".join(str(n) for n in node_ids))
return f"""
SELECT {select_clause}
FROM ({source_table})
WHERE calculation_detail.node_id IN {nodes_selector}
AND calculation_detail.create_date BETWEEN "{start_utc_string}" AND "{end_utc_string}"
{downsample_clause}
ORDER BY calculation_detail.create_date
"""
def load_calculation_details(
db_engine,
node_ids,
start_time_local,
end_time_local,
include_hub_id=False,
downsample_factor=None,
):
""" Load node data from the calculation_details table, optionally with hub ID included from the readings table
Args:
db_engine: database engine created using `connect_to_db`
node_ids: iterable of node IDs to get data for
start_time_local: string of ISO-formatted start datetime in local time, inclusive
end_time_local: string of ISO-formatted end datetime in local time, inclusive
include_hub_id: if True, the output will include a 'hub_id' column.
Default False because the request including hub_id takes extra time.
downsample_factor: if this is a number, it will be used to select fewer rows.
You should get *roughly* n / downsample_factor samples.
Returns:
a pandas.DataFrame of data from the node IDs provided.
Raises:
sqlalchemy.OperationalError: database connection is not working
This is often due to a network disconnect.
In this case, a good debugging step is to reconnect to the database.
"""
connection = db_engine.connect()
calculation_details = pd.read_sql(
_get_calculation_details_query(
node_ids,
start_time_local,
end_time_local,
include_hub_id,
downsample_factor,
),
db_engine,
)
connection.close()
return calculation_details
def get_node_temperature_data(
start_time_local, end_time_local, node_id, downsample_factor=1
):
""" Load node temperature data only from the calculation_details table
Args:
start_time_local: string of ISO-formatted start datetime in local time, inclusive
end_time_local: string of ISO-formatted end datetime in local time, inclusive
node_id: node ID to get data from
downsample_factor: if this is a number, it will be used to select fewer rows.
You should get *roughly* n / downsample_factor samples.
Returns:
a pandas.DataFrame of temperature data (°C) in local time from the node IDs provided.
"""
db_engine = configure_database()
raw_node_data = load_calculation_details(
db_engine, [node_id], start_time_local, end_time_local, downsample_factor
)
print(f"{len(raw_node_data)} rows retrieved.")
temperature_data = raw_node_data[
raw_node_data["calculation_dimension"] == "temperature"
]
temperature_data_only = pd.DataFrame(
{
"timestamp": timezone.utc_series_to_local(temperature_data["create_date"]),
"temperature": temperature_data["calculated_value"],
}
).reset_index(drop=True)
return temperature_data_only
| StarcoderdataPython |
12813869 | from __future__ import unicode_literals, absolute_import
import re
from lxml import etree
import six
class Attributes(object):
def __init__(self, data, attributes):
self.data = data
self.attributes = attributes
def __pytoxml__(self, structure, element, name, pytoxml):
for key, value in self.attributes.items():
element.set(key, value)
if self.data:
if isinstance(self.data, dict):
pytoxml.type_builder_dict(self.data, element, name, pytoxml)
else:
element.text = self.data
class CData(object):
def __init__(self, string):
self.string = string
def __pytoxml__(self, structure, element, name, pytoxml):
element.text = etree.CDATA(self.string)
class PyToXml(object):
"""Class which allows you convert a deeply nested python structure
into an XML representation."""
def __init__(self, root_name, structure,
encoding="UTF-8", xml_declaration=False, root_attributes={},
escape_illegal_chars=False):
self.root = etree.Element(root_name, root_attributes)
self.root_name = root_name
self.structure = structure
self.encoding = encoding
self.xml_declaration = xml_declaration
self.escape_illegal_chars = escape_illegal_chars
self._flat_type_map = self.build_flat_type_map(self.type_map())
def build_flat_type_map(self, type_func_map):
"""Flatten the types so we can access them as quickly as
possible."""
type_list = {}
for typ, outputter in type_func_map.items():
# there might be tuples thanks to things like StringTypes
if isinstance(typ, tuple):
for subtype in typ:
type_list[subtype] = outputter
else:
type_list[typ] = outputter
return type_list
def pluralisation(self, plural):
"""Returns a string that is suitable for elements of a
list. Intended to be overridden for more complex pluralisation
logic."""
return "item"
def type_builder_list(self, structure, element, name, pytoxml):
for value in structure:
sub = etree.SubElement(element, self.pluralisation(name))
self.traverse(value, sub, name)
def type_builder_string(self, structure, element, name, pytoxml):
if self.escape_illegal_chars:
try:
element.text = structure
except ValueError:
element.text = escape_xml_illegal_chars(structure)
else:
element.text = structure
def type_builder_dict(self, structure, element, name, pytoxml):
for key, value in six.iteritems(structure):
sub = etree.SubElement(element, key)
self.traverse(value, sub, key)
def type_builder_number(self, structure, element, name, pytoxml):
element.text = str(structure)
def type_builder_bool(self, structure, element, name, pytoxml):
element.text = str(structure).lower()
def add_type_handler(self, typ, handler=None):
new_map = { }
new_map[typ] = handler
self._flat_type_map = dict(list(self._flat_type_map.items())
+ list(self.build_flat_type_map(new_map).items()))
def type_map(self):
type_map = {
# lists
list: self.type_builder_list,
tuple: self.type_builder_list,
# numerical
float: self.type_builder_number,
# other
str: self.type_builder_string,
type(u""): self.type_builder_string,
dict: self.type_builder_dict,
bool: self.type_builder_bool,
}
# Support v2.x and 3.x integer types
for six_type in six.integer_types:
type_map[six_type] = self.type_builder_number
return type_map
def traverse(self, structure, element, name):
"""Loop over the structure, convert to an etree style element
and apply to element. The argument `name` is the element name
of the parent."""
typ = type(structure)
processor = self._flat_type_map.get(typ)
if not processor:
# if we find a __pytoxml__ then use that
if hasattr(structure, "__pytoxml__"):
processor = structure.__pytoxml__
else:
raise TypeError("Don't know how to serialise %s." % typ)
return processor(structure, element, name, self)
def encode(self):
"""Encode the structure passed into the constructor as
XML. This method must be called before this object is output
as a string."""
self.traverse(self.structure, self.root, self.root_name)
return self
def __str__(self):
"""Output the XML."""
st = etree.tostring(self.root,
encoding=self.encoding,
xml_declaration=self.xml_declaration)
return st.decode(self.encoding)
_illegal_xml_chars_RE = re.compile(
u'[\xad\xc2\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
def escape_xml_illegal_chars(val, replacement=''):
return _illegal_xml_chars_RE.sub(replacement, val)
| StarcoderdataPython |
3438140 | <filename>archive_code/kah.py<gh_stars>0
#!/usr/bin/env python
# -*- mode: python; python-indent-tabs-mode: nil; python-indent-level: 4; tab-width: 4 -*-
###
### python wrapper for calling kpsapihelper, which is part of tbxsosd-config right now
###
### sucks...
import sys
import os
import tempfile
import subprocess
import uuid
# kpython
from kout import *
### create new identity, return id
def kah_get_new_id_act():
try:
tf = TmpWorkFile()
kah_proc(["kpsapihelper", "get_new_id_act", tf.name])
id = tf.read()
tf.close()
return id
except Exception, e:
err(str(e))
return False
### get kar
def kah_get_kar_act(id, admin_name, admin_email, country, state, location, org, org_unit, domain, email):
s = ""
s = s + "[main]\n"
s = s + "admin_name=%s\n" % ( str(admin_name) )
s = s + "admin_email=%s\n" % ( str(admin_email) )
s = s + "[csr]\n"
s = s + "country=%s\n" % ( str(country) )
s = s + "state=%s\n" % ( str(state) )
s = s + "location=%s\n" % ( str(location) )
s = s + "org=%s\n" % ( str(org) )
s = s + "org_unit=%s\n" % ( str(org_unit) )
s = s + "domain=%s\n" % ( str(domain) )
s = s + "email=%s\n" % ( str(email) )
try:
tf_config = TmpWorkFile()
tf_kar = TmpWorkFile()
tf_config.write(s)
tf_config.flush()
kah_proc(["kpsapihelper", "get_kar_act", id, tf_config.name, tf_kar.name])
kar = tf_kar.read()
tf_config.close()
tf_kar.close()
return kar
except Exception, e:
err(str(e))
return False
### activate
def kah_set_kap_act(id, kap):
try:
tf = TmpWorkFile()
tf.write(kap)
tf.flush()
kah_proc(["kpsapihelper", "set_kap_act", id, tf.name])
return True
except Exception, e:
err(str(e))
return False
### add group
def kah_add_group(org_id, group_name):
try:
tf = TmpWorkFile()
kah_proc(["kpsapihelper", "add_group", str(org_id), group_name, tf.name])
group_id = int(tf.read())
tf.close()
return group_id
except Exception, e:
err(str(e))
return False
### get a backup of database and kps config
def kah_backup():
try:
tf = TmpWorkFile()
kah_proc(["kpsapihelper", "backup", tf.name])
backup = tf.read()
tf.close()
return backup
except Exception, e:
err(str(e))
return None
# yeah..
# had problems using tempfile.NamedTemporaryFile() when trying to read a file after it has been written by another proc
# IS NOT A COMPATIBLE FILE OBJECT
# you must call close() if you want the file to be removed!
class TmpWorkFile:
def __init__(self):
self.name = "/tmp/kah_" + str(uuid.uuid4())
def read(self):
try:
f = open(self.name, "r")
data = f.read()
f.close()
return data
except:
return None
def write(self, data):
try:
f = open(self.name, "w")
f.write(data)
f.close()
return True
except:
return False
def flush(self):
pass # not used
def close(self):
try:
if os.access(self.name, os.W_OK):
os.remove(self.name)
except:
pass
### restore from a backup
def kah_restore(data):
try:
tf = TmpWorkFile()
tf.write(data)
tf.flush()
kah_proc(["kpsapihelper", "restore", tf.name])
tf.close()
return True
except Exception, e:
err(str(e))
return False
### run program, log data if it fails
def kah_proc(cmd):
try:
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
strout, strerr = proc.communicate("")
ret = proc.returncode
if ret != 0:
err("kpsapihelper command: " + " ".join(cmd))
for s in strout.split("\n"):
err("kpsapihelper stdout: " + s)
for s in strerr.split("\n"):
err("kpsapihelper stderr: " + s)
raise Exception("kpsapihelper return code: %d" % ( ret ) )
except Exception, e:
err("kah_proc: " + str(e))
raise Exception("kah_proc: " + str(e))
### BEGIN_TESTS ###
def kah_tests():
print "BEGIN"
if 0:
id = kah_get_new_id_act()
print "NEW ID: " + id + "\n"
print "\n\n"
kar = kah_get_kar_act(id, "admin_name", "admin_email", "country", "state", "location", "org", "org_unit", "domain", "email")
print "KAR IS:\n"
print kar
print "\n\n"
### can't be tested easily right now
#status = kah_set_kap_act("0001", "/tmp/kap.bin")
#print "KAP installed: " + str(status) + "\n"
#print "\n\n"
if 1:
backup = kah_backup()
print "BACKUP: LEN=" + str(len(backup)) + "\n"
print "END"
if __name__ == "__main__":
kah_tests()
### END_TESTS ###
| StarcoderdataPython |
5181682 | <reponame>EncryptEx/myhackupc
from __future__ import print_function
import csv
import sys
from django.core.management.base import BaseCommand
from app import slack
from app.slack import SlackInvitationException
def slack_invite(email):
try:
slack.send_slack_invite(email)
print('Slack invite sent to ' + email + '.')
except SlackInvitationException as e:
print('Slack error: ' + str(e))
class Command(BaseCommand):
"""
Format CSV: email
"""
help = 'Invites volunteers to Slack'
def add_arguments(self, parser):
parser.add_argument('csv_filename',
default=False,
help='csv filename')
def handle(self, *args, **options):
with open(options['csv_filename']) as csv_f:
for row in csv.reader(csv_f):
email = row[0]
try:
print('Inviting user {0}.'.format(email))
slack_invite(email)
except Exception:
print('There was a problem inviting the user: {0}. Error: {1}.'
.format(email, sys.exc_info()[1]))
| StarcoderdataPython |
3212282 | from __future__ import print_function
import os
import sys
import multiprocessing
import time
import json
import uuid
import functools
import gzip
import vcr
import vcr.cassette
import vcr.errors
import vcr.serialize
import vcr.request
from flowy import restart
from flowy import wait
from flowy import TaskError
from flowy import SWFActivityConfig
from flowy import SWFActivityWorker
from flowy import SWFClient
from flowy import SWFWorkflowConfig
from flowy import SWFWorkflowStarter
from flowy import SWFWorkflowWorker
VERSION = 2
HERE = os.path.dirname(os.path.realpath(__file__))
A_CASSETTE = os.path.join(HERE, 'cassettes/a.yml.gz')
W_CASSETTE = os.path.join(HERE, 'cassettes/w.yml.gz')
DOMAIN = 'FlowyIntegrationTest' # a domain where you have access
TASKLIST = 'tl'
IDENTITY = 'test'
RECORDING = False
exit_event = multiprocessing.Event()
wf_finished_event = multiprocessing.Event()
# Patch vcr to use gzip files
def load_cassette(cassette_path, serializer):
f = gzip.open(cassette_path, 'rb')
cassette_content = f.read()
cassette = vcr.serialize.deserialize(cassette_content, serializer)
f.close()
return cassette
def save_cassette(cassette_path, cassette_dict, serializer):
data = vcr.serialize.serialize(cassette_dict, serializer)
dirname, _ = os.path.split(cassette_path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
f = gzip.open(cassette_path, 'wb')
f.write(data)
f.close()
vcr.cassette.load_cassette = load_cassette
vcr.cassette.save_cassette = save_cassette
# Patch requests_match in cassette for speed-up
def requests_match(r1, r2, matchers):
"""Skip logging and speed-up maching."""
return all(m(r1, r2) for m in matchers)
vcr.cassette.requests_match = requests_match
# Patch urpalse to speed-up for python3
try:
from functools import lru_cache
from urllib.parse import urlparse
vcr.request.urlparse = lru_cache(maxsize=None)(urlparse)
except ImportError:
pass
# patch uuid4 for consistent keys
def fake_uuid4():
x = 0
while 1:
yield 'fakeuuid-%s-' % x
x += 1
uuid.uuid4 = functools.partial(next, fake_uuid4())
def break_loop(self):
return exit_event.is_set()
class TestSWFWorkflowWorker(SWFWorkflowWorker):
break_loop = break_loop
class TestSWFActivityWorker(SWFActivityWorker):
break_loop = break_loop
class BaseWorkflow(object):
def __call__(self, *args, **kwargs):
r = self.call(*args, **kwargs)
wait(r)
wf_finished_event.set()
return r
def call(self, *args, **kwargs):
raise NotImplementedError
a_conf = SWFActivityConfig(default_task_list=TASKLIST,
default_schedule_to_start=30,
default_schedule_to_close=60,
default_start_to_close=15,
default_heartbeat=10)
@a_conf(version=VERSION)
def tactivity(hb, a=None, b=None, sleep=None, heartbeat=False, err=None):
result = None
if a is not None and b is not None:
result = a + b
elif a is not None:
result = a * a
if sleep is not None and RECORDING:
time.sleep(sleep)
if heartbeat:
hb()
if err is not None:
raise RuntimeError(err)
return result
empty_conf = SWFWorkflowConfig(default_task_list=TASKLIST,
default_decision_duration=10,
default_workflow_duration=20,
default_child_policy='TERMINATE', )
empty_conf.conf_activity('activity', VERSION, 'tactivity')
@empty_conf(version=VERSION)
class TWorkflow(object):
def __init__(self, activity):
pass
def __call__(self, a=None, b=None, sleep=None, heartbeat=False, err=None):
dummy_heartbeat = lambda: True
return tactivity(dummy_heartbeat, a, b, sleep, heartbeat, err)
conf_use_activities = SWFWorkflowConfig(default_task_list=TASKLIST,
default_decision_duration=10,
default_workflow_duration=60,
default_child_policy='TERMINATE')
conf_use_activities.conf_activity('task', VERSION, 'tactivity')
conf_use_activities.conf_activity('short_task', VERSION, 'tactivity',
schedule_to_close=1,
retry=(0, ))
conf_use_activities.conf_activity('delayed_task', VERSION, 'tactivity',
retry=(3, ))
conf_use_activities.conf_activity('non_existing_task', 1, 'xxx')
conf_use_workflow = SWFWorkflowConfig(default_task_list=TASKLIST,
default_decision_duration=10,
default_workflow_duration=60,
default_child_policy='TERMINATE')
conf_use_workflow.conf_workflow('task', VERSION, 'TWorkflow')
conf_use_workflow.conf_workflow('short_task', VERSION, 'TWorkflow',
workflow_duration=1,
retry=(0, ))
conf_use_workflow.conf_workflow('delayed_task', VERSION, 'TWorkflow',
retry=(3, ))
conf_use_workflow.conf_workflow('non_existing_task', 1, 'xxx')
@conf_use_activities(version=VERSION)
@conf_use_workflow(version=VERSION, name='TestWorkflowW')
class TestWorkflow(BaseWorkflow):
def __init__(self, task, short_task, delayed_task, non_existing_task):
self.task = task
self.short_task = short_task
self.delayed_task = delayed_task
self.non_existing_task = non_existing_task
def call(self):
tasks = [self.task(10),
self.task(err=u'Error!'),
self.task(heartbeat=True),
self.short_task(sleep=3),
self.delayed_task(20),
self.non_existing_task(), ]
last = self.task(1, 1) # Make the history longer, to have pages
for _ in range(20):
last = self.task(last, 1)
tasks.append(last)
for t in tasks:
try:
wait(t)
except TaskError:
pass
@empty_conf(version=VERSION)
class RestartWorkflow(BaseWorkflow):
def __init__(self, activity):
pass
def call(self, should_restart=True):
if should_restart:
return restart(should_restart=False)
return 1
@empty_conf(version=VERSION)
class ExitWorkflow(object):
def __init__(self, activity):
exit_event.set()
wait(activity()) # wake the activity thread
def __call__(self):
pass
wworker = TestSWFWorkflowWorker()
wworker.scan(package=sys.modules[__name__])
aworker = TestSWFActivityWorker()
aworker.scan(package=sys.modules[__name__])
body_cache = {}
def body_as_dict(r1, r2):
if r1 not in body_cache:
r1b = r1.body if isinstance(r1.body, str) else r1.body.decode('utf-8')
body_cache[r1] = json.loads(r1b)
if r2 not in body_cache:
r2b = r2.body if isinstance(r2.body, str) else r2.body.decode('utf-8')
body_cache[r2] = json.loads(r2b)
return body_cache[r1] == body_cache[r2]
def escaped_headers(r1, r2):
import urllib
r1h = dict((h, urllib.unquote(v)) for h, v in r1.headers.items())
r2h = dict((h, urllib.unquote(v)) for h, v in r1.headers.items())
return r1 == r2
vcr.default_vcr.register_matcher('dict_body', body_as_dict)
vcr.default_vcr.register_matcher('esc_headers', body_as_dict)
cassette_args = {
'match_on': ['dict_body', 'esc_headers', 'query', 'method', 'uri', 'host',
'port', 'path'],
'filter_headers': ['authorization', 'x-amz-date', 'content-length',
'user-agent']
}
def test_activity_integration():
with vcr.use_cassette(A_CASSETTE,
record_mode='none', **cassette_args) as cass:
try:
cl = SWFClient(kwargs={'aws_access_key_id': 'x',
'aws_secret_access_key': 'x',
'region_name': 'us-east-1'})
aworker.run_forever(DOMAIN, TASKLIST,
identity=IDENTITY,
swf_client=cl,
setup_log=False)
except vcr.errors.CannotOverwriteExistingCassetteException:
pass
assert cass.all_played
def test_workflow_integration():
with vcr.use_cassette(W_CASSETTE,
record_mode='none', **cassette_args) as cass:
try:
cl = SWFClient(kwargs={'aws_access_key_id': 'x',
'aws_secret_access_key': 'x',
'region_name': 'us-east-1'})
wworker.run_forever(DOMAIN, TASKLIST,
identity=IDENTITY,
swf_client=cl,
setup_log=False)
except vcr.errors.CannotOverwriteExistingCassetteException:
pass
assert cass.all_played
def start_activity_worker():
with vcr.use_cassette(A_CASSETTE,
record_mode='all', **cassette_args) as cass:
try:
aworker.run_forever(DOMAIN, TASKLIST, identity=IDENTITY)
except vcr.errors.CannotOverwriteExistingCassetteException:
pass
def start_workflow_worker():
with vcr.use_cassette(W_CASSETTE,
record_mode='all', **cassette_args) as cass:
try:
wworker.run_forever(DOMAIN, TASKLIST, identity=IDENTITY)
except vcr.errors.CannotOverwriteExistingCassetteException:
pass
if __name__ == '__main__':
RECORDING = True
try:
os.remove(A_CASSETTE)
except:
pass
try:
os.remove(W_CASSETTE)
except:
pass
a_worker = multiprocessing.Process(target=start_activity_worker)
w_worker = multiprocessing.Process(target=start_workflow_worker)
a_worker.start()
w_worker.start()
time.sleep(5) # Wait for registration
wfs = ['TestWorkflow', 'TestWorkflowW', 'RestartWorkflow']
for wf in wfs:
print('Starting', wf)
SWFWorkflowStarter(DOMAIN, wf, VERSION)()
wf_finished_event.wait()
wf_finished_event.clear()
# Must be the last one
print('Prepare to exit')
SWFWorkflowStarter(DOMAIN, 'ExitWorkflow', VERSION)()
a_worker.join()
w_worker.join()
| StarcoderdataPython |
3223811 | #!/usr/bin/env python
from functools import reduce
from google.cloud.monitoring_v3 import MetricServiceClient
from google.cloud.monitoring_v3.types import LabelDescriptor, MetricDescriptor, TimeSeries
from os import environ
import psutil as ps
import requests
from signal import signal, SIGTERM
from sys import stderr
from time import sleep, time
def get_metadata(key):
return requests.get(
'http://metadata.google.internal/computeMetadata/v1/instance/' + key,
headers={'Metadata-Flavor': 'Google'}
).text
def reset():
global memory_used, disk_used, disk_reads, disk_writes, report_time
# Explicitly reset the CPU counter, because the first call of this method always reports 0
ps.cpu_percent()
memory_used = 0
disk_used = 0
disk_reads = disk_io('read_count')
disk_writes = disk_io('write_count')
report_time = 0
def measure():
global memory_used, disk_used, report_time
memory_used = max(memory_used, MEMORY_SIZE - mem_usage('available'))
disk_used = max(disk_used, disk_usage('used'))
report_time += MEASUREMENT_TIME_SEC
sleep(MEASUREMENT_TIME_SEC)
def mem_usage(param):
return getattr(ps.virtual_memory(), param)
def disk_usage(param):
return reduce(
lambda usage, mount: usage + getattr(ps.disk_usage(mount), param),
DISK_MOUNTS, 0,
)
def disk_io(param):
return getattr(ps.disk_io_counters(), param)
def format_gb(value_bytes):
return '%.1f' % round(value_bytes / 2**30, 1)
def get_metric(key, value_type, unit, description):
return client.create_metric_descriptor(PROJECT_NAME, MetricDescriptor(
type='/'.join(['custom.googleapis.com', METRIC_ROOT, key]),
description=description,
metric_kind='GAUGE',
value_type=value_type,
unit=unit,
labels=LABEL_DESCRIPTORS,
))
def create_time_series(series):
client.create_time_series(PROJECT_NAME, series)
def get_time_series(metric_descriptor, value):
series = TimeSeries()
series.metric.type = metric_descriptor.type
labels = series.metric.labels
labels['workflow_id'] = WORKFLOW_ID
labels['task_call_name'] = TASK_CALL_NAME
labels['task_call_index'] = TASK_CALL_INDEX
labels['task_call_attempt'] = TASK_CALL_ATTEMPT
labels['cpu_count'] = CPU_COUNT_LABEL
labels['mem_size'] = MEMORY_SIZE_LABEL
labels['disk_size'] = DISK_SIZE_LABEL
series.resource.type = 'gce_instance'
series.resource.labels['zone'] = ZONE
series.resource.labels['instance_id'] = INSTANCE
point = series.points.add(value=value)
point.interval.end_time.seconds = int(time())
return series
def report():
create_time_series([
get_time_series(CPU_UTILIZATION_METRIC, { 'double_value': ps.cpu_percent() }),
get_time_series(MEMORY_UTILIZATION_METRIC, { 'double_value': memory_used / MEMORY_SIZE * 100 }),
get_time_series(DISK_UTILIZATION_METRIC, { 'double_value': disk_used / DISK_SIZE * 100 }),
get_time_series(DISK_READS_METRIC, { 'double_value': (disk_io('read_count') - disk_reads) / report_time }),
get_time_series(DISK_WRITES_METRIC, { 'double_value': (disk_io('write_count') - disk_writes) / report_time }),
])
### Define constants
# Cromwell variables passed to the container
# through environmental variables
WORKFLOW_ID = environ['WORKFLOW_ID']
TASK_CALL_NAME = environ['TASK_CALL_NAME']
TASK_CALL_INDEX = environ['TASK_CALL_INDEX']
TASK_CALL_ATTEMPT = environ['TASK_CALL_ATTEMPT']
DISK_MOUNTS = environ['DISK_MOUNTS'].split()
# GCP instance name, zone and project
# from instance introspection API
INSTANCE = get_metadata('name')
_, PROJECT, _, ZONE = get_metadata('zone').split('/')
client = MetricServiceClient()
PROJECT_NAME = client.project_path(PROJECT)
METRIC_ROOT = 'wdl_task'
MEASUREMENT_TIME_SEC = 1
REPORT_TIME_SEC = 60
LABEL_DESCRIPTORS = [
LabelDescriptor(
key='workflow_id',
description='Cromwell workflow ID',
),
LabelDescriptor(
key='task_call_name',
description='Cromwell task call name',
),
LabelDescriptor(
key='task_call_index',
description='Cromwell task call index',
),
LabelDescriptor(
key='task_call_attempt',
description='Cromwell task call attempt',
),
LabelDescriptor(
key='cpu_count',
description='Number of virtual cores',
),
LabelDescriptor(
key='mem_size',
description='Total memory size, GB',
),
LabelDescriptor(
key='disk_size',
description='Total disk size, GB',
),
]
CPU_COUNT = ps.cpu_count()
CPU_COUNT_LABEL = str(CPU_COUNT)
MEMORY_SIZE = mem_usage('total')
MEMORY_SIZE_LABEL = format_gb(MEMORY_SIZE)
DISK_SIZE = disk_usage('total')
DISK_SIZE_LABEL = format_gb(DISK_SIZE)
CPU_UTILIZATION_METRIC = get_metric(
'cpu_utilization', 'DOUBLE', '%',
'% of CPU utilized in a Cromwell task call',
)
MEMORY_UTILIZATION_METRIC = get_metric(
'mem_utilization', 'DOUBLE', '%',
'% of memory utilized in a Cromwell task call',
)
DISK_UTILIZATION_METRIC = get_metric(
'disk_utilization', 'DOUBLE', '%',
'% of disk utilized in a Cromwell task call',
)
DISK_READS_METRIC = get_metric(
'disk_reads', 'DOUBLE', '{reads}/s',
'Disk read IOPS in a Cromwell task call',
)
DISK_WRITES_METRIC = get_metric(
'disk_writes', 'DOUBLE', '{writes}/s',
'Disk write IOPS in a Cromwell task call',
)
### Detect container termination
def signal_handler(signum, frame):
global running
running = False
running = True
signal(SIGTERM, signal_handler)
### Main loop
#
# It continuously measures runtime metrics every MEASUREMENT_TIME_SEC,
# and reports them to Stackdriver Monitoring API every REPORT_TIME_SEC.
#
# However, if it detects a container termination signal,
# it *should* report the final metric
# right after the current measurement, and then exit normally.
reset()
while running:
measure()
if not running or report_time >= REPORT_TIME_SEC:
report()
reset()
exit(0)
| StarcoderdataPython |
254191 | import os
import getpass
from fabric import Connection
from invoke import task
sudo_pass = getpass.getpass()
GIT_REPO = "https://github.com/xuhaer/Django_Blog"
con = Connection('har@192.168.127.12:22')
@task
def deploy(con):
source_folder = '~/sites/blog.xuhaer.com/Django_Blog/'
# con.run('cd {} && git pull'.format(source_folder), pty=True)
con.run("""
cd {} &&
source /home/har/sites/blog.xuhaer.com/venv/bin/activate &&
pip install -r requirements.txt &&
python manage.py migrate &&
gunicorn blogproject.wsgi:application -c ./blogproject/gunicorn_conf.py
""".format(source_folder), pty=True)
| StarcoderdataPython |
56575 | import sys
import json
import time
import random
import datetime
from pytz import timezone
import requests
# Settings
url = 'https://hooks.slack.com/services/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'
exercises = {
'Plank': {'range': (20, 50), 'unit': 'second'},
'Wall Sit': {'range': (40, 90), 'unit': 'second'},
'Push Ups': {'range': (5, 15), 'unit': None},
'Calf Raises': {'range': (25, 50), 'unit': None},
'Crunches': {'range': (15, 25), 'unit': None},
'Stretch': {'range': (60, 120), 'unit': 'second'},
'Lunges': {'range': (10, 25), 'unit': None}
}
message_timezone = 'MST'
next = (50, 75)
night = 18
def generate_message(minutes_to_next=None):
"""
Using the list of exercises, this function generates a new exercise message. Optionally it
takes a minutes_to_next parameter which it uses to add an indication of when the next exercise
will take place.
"""
# Randomly select an exercise and a number of repetitions
exercise, data = random.choice(exercises.items())
repetitions = random.randint(*data['range'])
# Prepare the message string
unit_string = ' ' + data['unit'] if data['unit'] else ''
text = '{}{} {} RIGHT NOW!'.format(repetitions, prefix, exercise)
# Add the next exercise indication
if minutes_to_next is not None:
current_time = datetime.datetime.now(timezone('UTC'))
next_time = (now.astimezone(timezone(message_timezone)) +
datetime.timedelta(minutes=minutes_to_next))
next_text = 'NEXT EXERCISE AT {}'.format(time.strftime('%H:%M'))
text += '\n' + next_text
return text
def postMessage():
exercise = random.choice(exercises.keys())
properties = exercises[exercise]
number = random.randint(properties['range'][0], properties['range'][1])
prefix = '' if not properties['unit'] else ' {}'.format(properties['unit'])
wait = random.randint(next[0], next[1])
now = datetime.datetime.now(timezone('UTC'))
time = (now.astimezone(timezone('MST')) + datetime.timedelta(minutes=wait))
text = '<!channel> {}{} {} RIGHT NOW!'.format(number, prefix, exercise)
if time.hour < night:
text += '\nNEXT THING AT {}'.format(time.strftime('%H:%M'))
#print "Posting {}".format(text)
payload = {'text': text}
r = requests.post(url, data=json.dumps(payload))
#if r.status_code != 200:
#print r.content
return wait
def startLoop():
while True:
# Post a new message
wait = postMessage()
assert wait > 5
#Heartbeat every 60 seconds to prevent program from terminating
for _ in xrange(wait):
time.sleep(60)
sys.stdout.write('/\\_')
sys.stdout.write('\n')
#Stop at Night
now = datetime.datetime.now(timezone('UTC'))
if now.astimezone(timezone('MST')).hour >= night:
text = 'I\'m out. PEACE Y\'ALL'
#print "Posting {}".format(text)
payload = {'text': text}
r = requests.post(url, data=json.dumps(payload))
exit()
if __name__ == '__main__':
generate_message()
#startLoop()
| StarcoderdataPython |
1977022 | <gh_stars>0
nome = str(input('Qual é o seu nome?'))
if nome == 'Daniel':
print('Que nome lindo você tem!')
print('Bom dia {} '.format(nome)) # Não precisou do "ELSE" (estrutura condicional simples)
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m = (n1 + n2)/2
print('A sua média foi {:.1f}'.format(m))
if m >= 6:
print('Sua média foi boa! PARABÉNS!')
else:
print('Sua média foi ruim!')
print('Parabéns' if m >= 6 else 'ESTUDE MAIS!')
| StarcoderdataPython |
361760 | <reponame>xp4ck/api-client-python<filename>gophish/api/webhooks.py
from gophish.models import Webhook
from gophish.api import APIEndpoint
class API(APIEndpoint):
def __init__(self, api, endpoint='api/webhooks/'):
super(API, self).__init__(api, endpoint=endpoint, cls=Webhook)
def get(self, webhook_id=None):
"""Gets one or more webhooks
Keyword Arguments:
webhook_id {int} -- The ID of the Webhook (optional, default: {None})
"""
return super(API, self).get(resource_id=webhook_id)
def post(self, webhook):
"""Creates a new webhook
Arguments:
webhook {gophish.models.Webhook} -- The webhook to create
"""
return super(API, self).post(webhook)
def put(self, webhook):
"""Edits a webhook
Arguments:
webhook {gophish.models.Webhook} -- The updated webhook details
"""
return super(API, self).put(webhook)
def delete(self, webhook_id):
"""Deletes a webhook by ID
Arguments:
webhook_id {int} -- The ID of the webhook to delete
"""
return super(API, self).delete(webhook_id)
def validate(self, webhook_id):
"""Sends a validation payload to the webhook specified by the given ID
Arguments:
webhook_id {int} -- The ID of the webhook to validate
"""
return self.request("POST",
resource_id=webhook_id,
resource_action='validate',
resource_cls=Webhook,
single_resource=True)
| StarcoderdataPython |
8196842 | from django.contrib import admin
# Register your models here.
from .models import Journal
admin.site.register(Journal) | StarcoderdataPython |
4861839 | <gh_stars>0
from django.shortcuts import redirect, get_object_or_404
from django.views.generic import TemplateView, UpdateView
from imager_images.models import Photo, Album
from .models import ImagerProfile
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from .forms import ProfileEditForm
from random import sample
class ProfileView(LoginRequiredMixin, TemplateView):
template_name = 'imager_profile/user.html'
context_object_name = 'profile'
login_url = reverse_lazy('auth_login')
success_url = reverse_lazy('profile')
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(**kwargs)
profile = get_object_or_404(ImagerProfile, user__username=self.username)
albums = Album.objects.filter(user__username=self.username)
photos = Photo.objects.filter(album__user__username=self.username)
context['profile'] = profile
context['albums'] = albums
context['photos'] = photos
context['background'] = sample(list(Photo.objects.filter(published="PUBLIC")) + [None], 1)[0]
return context
def get(self, request, *args, username=None, **kwargs):
"""Get function."""
username = request.user.get_username()
if not username:
return redirect('home')
self.username = username
return super().get(request, *args, username=username, **kwargs)
class ProfileEditView(LoginRequiredMixin, UpdateView):
template_name = 'imager_profile/profile_edit.html'
model = ImagerProfile
form_class = ProfileEditForm
login_url = reverse_lazy('auth_login')
success_url = reverse_lazy('profile')
slug_url_kwarg = 'username'
slug_field = 'user__username'
def get(self, *args, **kwargs):
self.kwargs['username'] = self.request.user.get_username()
return super().get(*args, **kwargs)
def post(self, *args, **kwargs):
self.kwargs['username'] = self.request.user.get_username()
return super().post(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({'username': self.request.user.get_username()})
return kwargs
def form_valid(self, form):
form.instance.user.email = form.data['email']
form.instance.user.first_name = form.data['first_name']
form.instance.user.last_name = form.data['last_name']
form.instance.user.save()
return super().form_valid(form) | StarcoderdataPython |
6455771 | <gh_stars>0
from collections import defaultdict
def text2count(body):
freq = defaultdict(int)
for word in body:
freq[word] += 1
return freq
def lexical_overlaps(title, body, idf):
features = []
words_in_body = text2count(body)
words_in_title = text2count(title)
maximum, maximum_cnt = 0.0, 0.0
# for (word, cnt_title) in words_in_title.iteritems():
for (word, cnt_title) in words_in_title.items(): # NOTE: Changed for python3.
maximum += cnt_title * idf[word]
maximum_cnt += cnt_title
overlaps, overlap_cnt = 0, 0
# for (word, cnt_title) in words_in_title.iteritems():
for (word, cnt_title) in words_in_title.items(): # NOTE: Changed for python3.
if word in words_in_body:
tf = min(cnt_title, words_in_body[word])
overlap_cnt += tf
overlaps += tf * idf[word]
features += [overlaps, overlaps / maximum, overlap_cnt, overlap_cnt / maximum_cnt]
words_in_body = text2count(body[:len(title) * 4])
overlaps, overlap_cnt = 0, 0
# for (word, cnt_title) in words_in_title.iteritems():
for (word, cnt_title) in words_in_title.items(): # NOTE: Changed for python3.
if word in words_in_body:
tf = min(cnt_title, words_in_body[word])
overlap_cnt += tf
overlaps += tf * idf[word]
features += [overlaps, overlaps / maximum, overlap_cnt, overlap_cnt / maximum_cnt]
return features
import numpy as np
def title2vector(title, word2vec, idf):
vector = np.array([0.0] * 300)
cnt = 0
for word in title:
if word in word2vec:
vector += word2vec[word]
cnt += 1
if cnt > 0:
vector /= cnt
vector /= np.linalg.norm(vector)
return vector
def compute_overlap(title, body_sentence, idf):
words_in_body = text2count(body_sentence)
words_in_title = text2count(title)
maximum, maximum_cnt = 0.0, 0.0
# for (word, cnt_title) in words_in_title.iteritems():
for (word, cnt_title) in words_in_title.items(): # NOTE: Changed for python3.
maximum += cnt_title * idf[word]
maximum_cnt += cnt_title
overlaps, overlap_cnt = 0, 0
# for (word, cnt_title) in words_in_title.iteritems():
for (word, cnt_title) in words_in_title.items(): # NOTE: Changed for python3.
if word in words_in_body:
tf = min(cnt_title, words_in_body[word])
overlap_cnt += tf
overlaps += tf * idf[word]
return overlaps / maximum, overlap_cnt / maximum_cnt
def semantic_similarities(title, body_sentences, word2vec, idf):
max_overlap, max_overlap_cnt = 0, 0
title_vector = title2vector(title, word2vec, idf)
max_sim = -1
best_vector = np.array([0.0] * 300)
supports = []
for sub_body in body_sentences:
sub_body_vector = title2vector(sub_body, word2vec, idf)
cur_overlap, cur_overlap_cnt = compute_overlap(title, sub_body, idf)
max_overlap = max(max_overlap, cur_overlap)
max_overlap_cnt = max(max_overlap_cnt, cur_overlap_cnt)
similarity = 0
# for i in xrange(300):
for i in range(300):
similarity += title_vector[i] * sub_body_vector[i]
if similarity > max_sim:
max_sim = similarity
best_vector = sub_body_vector
supports.append(similarity)
features = [max_overlap, max_overlap_cnt, max(supports), min(supports)]
#for v in title_vector - best_vector:
# features.append(v)
for v in best_vector:
features.append(v)
for v in title_vector:
features.append(v)
return features
def extract_features(title, body, body_sentences, idf, word2vec):
return lexical_overlaps(title, body, idf) + semantic_similarities(title, body_sentences, word2vec, idf)
| StarcoderdataPython |
5026085 | <filename>pyrival/__init__.py
import os as _os
from .version import version as __version__
for _s in ('algebra', 'combinatorics', 'data_structures', 'geometry', 'graphs', 'linear_algebra', 'numerical',
'strings', 'misc', 'tools'):
__path__.append(_os.path.join(_os.path.dirname(__file__), _s))
| StarcoderdataPython |
4985903 | # -*- coding: utf-8 -*-
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.exceptions import NotFound
from modules.statistics.serializers.user_stats import UserStatsSerializer
from users.models import EndWorker
class UserStatsView(GenericAPIView):
serializer_class = UserStatsSerializer
authentication_classes = []
permission_classes = (AllowAny,)
def get(self, request, user_id, *args, **kwargs):
user = EndWorker.objects.filter(id=user_id).first()
if user:
user.stats.update()
user.stats.update_agreement_ranking()
serializer = self.serializer_class(user.stats)
return Response(serializer.data)
return NotFound("No User find for given id.")
| StarcoderdataPython |
3296509 | from typing import Any, Dict, Optional, Tuple
from starkware.cairo.lang.vm.builtin_runner import BuiltinVerifier, SimpleBuiltinRunner
from starkware.cairo.lang.vm.relocatable import RelocatableValue
from starkware.python.math_utils import safe_div
class RangeCheckBuiltinRunner(SimpleBuiltinRunner):
def __init__(self, included: bool, ratio, inner_rc_bound, n_parts):
super().__init__('range_check', included, ratio)
self.inner_rc_bound = inner_rc_bound
self.bound = inner_rc_bound ** n_parts
self.n_parts = n_parts
def add_validation_rules(self, runner):
def rule(memory, addr):
value = memory[addr]
# The range check builtin asserts that 0 <= value < BOUND.
# For example, if the layout uses 8 16-bit range-checks per instance,
# bound will be 2**(16 * 8) = 2**128.
assert 0 <= value < self.bound, \
f'Value {value}, in range check builtin {addr - self.base}, is out of range ' \
f'[0, {self.bound}).'
return {addr}
runner.vm.add_validation_rule(self.base.segment_index, rule)
def air_private_input(self, runner) -> Dict[str, Any]:
assert self.base is not None, 'Uninitialized self.base.'
res: Dict[int, Any] = {}
for addr, val in runner.vm_memory.items():
if not isinstance(addr, RelocatableValue) or \
addr.segment_index != self.base.segment_index:
continue
idx = addr.offset
assert isinstance(val, int)
res[idx] = {'index': idx, 'value': hex(val)}
return {'range_check': sorted(res.values(), key=lambda item: item['index'])}
def get_range_check_usage(self, runner) -> Optional[Tuple[int, int]]:
assert self.base is not None, 'Uninitialized self.base.'
rc_min = None
rc_max = None
for addr, val in runner.vm_memory.items():
if not isinstance(addr, RelocatableValue) or \
addr.segment_index != self.base.segment_index:
continue
# Split val into n_parts parts.
for _ in range(self.n_parts):
part_val = val % self.inner_rc_bound
if rc_min is None:
rc_min = rc_max = part_val
else:
rc_min = min(rc_min, part_val)
rc_max = max(rc_max, part_val)
val //= self.inner_rc_bound
if rc_min is None or rc_max is None:
return None
return rc_min, rc_max
def get_used_perm_range_check_units(self, runner) -> int:
used_cells, _ = self.get_used_cells_and_allocated_size(runner)
# Each cell in the range check segment requires n_parts range check units.
return used_cells * self.n_parts
class RangeCheckBuiltinVerifier(BuiltinVerifier):
def __init__(self, included: bool, ratio):
self.included = included
self.ratio = ratio
def expected_stack(self, public_input):
if not self.included:
return [], []
addresses = public_input.memory_segments['range_check']
max_size = safe_div(public_input.n_steps, self.ratio)
assert 0 <= addresses.begin_addr <= addresses.stop_ptr <= \
addresses.begin_addr + max_size < 2**64
return [addresses.begin_addr], [addresses.stop_ptr]
| StarcoderdataPython |
8006633 | from abc import abstractmethod
class _BaseRedBlueFastq(object):
"""Base class for fastg to redbluegraphs
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def fit(self):
""" Placeholder for fit this
should be implemetned by sub-method"""
def condition_graph(self, cond):
""" get each condition after fitting"""
if cond not in self.cond_graphs.keys():
raise ValueError('Condition is not in input ',
'conditions: must be one of '
+ ' '.join(self.conditions))
return self.cond_graphs[cond]
def leftout_rest_graph(self, cond):
""" return a RedBlueDiGraph
"""
return self.rest_leftout[cond]
def node_map(self):
""" return a node labels in fatsa
type format
"""
return {y: x for x, y in self.nodes.items()}
| StarcoderdataPython |
6519817 | <reponame>JHP4911/e-learning_app
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-15 21:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('courses', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DeleteChapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chapter_name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='DeleteCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_name', models.CharField(max_length=50)),
],
),
migrations.RenameField(
model_name='chapter',
old_name='created_date',
new_name='chapter_created_date',
),
migrations.RenameField(
model_name='chapter',
old_name='course_fk',
new_name='course',
),
migrations.RenameField(
model_name='course',
old_name='created_date',
new_name='course_created_date',
),
migrations.RenameField(
model_name='textblock',
old_name='text_area_fk',
new_name='text_block_fk',
),
migrations.RenameField(
model_name='ytlink',
old_name='youtube_fk',
new_name='yt_link_fk',
),
migrations.RemoveField(
model_name='course',
name='course_link',
),
migrations.RemoveField(
model_name='course',
name='user_fk',
),
migrations.AddField(
model_name='course',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='course',
name='course_name',
field=models.CharField(max_length=50, unique=True),
),
migrations.AlterField(
model_name='textblock',
name='chapter_description',
field=models.TextField(),
),
migrations.AlterField(
model_name='ytlink',
name='link',
field=models.URLField(default='', unique=True),
),
]
| StarcoderdataPython |
3439116 | <gh_stars>0
from django.db import models
from django.contrib.auth.models import User
from .games.games import games
from datetime import time
import random
class BoardManager(models.Manager):
def create(self, game_id):
code = hex(random.randint(0, 1048575))[2:].zfill(5).upper()
state = State.objects.create(game_id=game_id, turn=1)
board = super().create(game_id=game_id, code=code, state=state)
game = games[game_id]
for x in range(0, game.width):
for y in range(0, game.height):
type, owner_id = board.game().initial(x, y)
if type: Piece.objects.create(
state=state, type_id=type.id, owner_id=owner_id, x=x, y=y)
return board
class Board(models.Model):
game_id = models.IntegerField()
code = models.CharField(max_length=5)
state = models.ForeignKey('State',
on_delete=models.SET_NULL, null=True, blank=True)
stage = models.IntegerField(default=0)
time = models.DateTimeField(auto_now_add=True)
class Meta: ordering = ['-time']
def __str__(self): return self.code + " " + self.game().name
boards = BoardManager()
def place_piece(self, player_id, x, y, type=None):
if not type: type = self.game().types[0]
if self.game().place_valid(self.state, self.state.pieces(),
type, player_id, x, y):
self.state = self.state.next()
self.game().place_piece(self.state, self.state.pieces(),
type, player_id, x, y)
if self.state.outcome != -1: self.stage = 2
self.state.save()
self.save()
return True
else: return False
def move_piece(self, x_from, y_from, x_to, y_to):
if self.game().move_valid(self.state, self.state.pieces(),
x_from, y_from, x_to, y_to):
self.state = self.state.next()
self.game().move_piece(self.state, self.state.pieces(),
x_from, y_from, x_to, y_to)
if self.state.outcome != -1: self.stage = 2
self.state.save()
self.save()
return True
else: return False
def remove_piece(self, x, y):
if self.game().remove_valid(self.state, self.state.pieces(), x, y):
self.state = self.state.next()
self.game().remove_piece(self.state, self.state.pieces(), x, y)
if self.state.outcome != -1: self.stage = 2
self.state.save()
self.save()
return True
else: return False
def selectable(self, x, y):
return self.game().selectable(self.state, self.state.pieces(), x, y)
def current(self, player):
return player and player.order == self.state.turn
def players(self):
return Player.objects.filter(board=self)
def player(self, user):
return self.players().filter(user=user).first()\
if user.is_authenticated else None
def game(self):
return games[self.game_id]
def messages(self):
return Message.objects.filter(board=self)
def users(self):
return map(lambda p: p.user, self.players())
def join(self, user):
Player.objects.create(user=user, board=self,
order=self.players().count()+1)
def start(self):
self.stage = 1
self.save()
def to_dictionary(self):
return {
'game': self.game(),
'code': self.code,
'state': self.state,
'players': self.players(),
'stage': self.stage,
'time': self.time,
'messages': self.messages()
}
class Player(models.Model):
user = models.ForeignKey(User,
on_delete=models.SET_NULL, null=True)
board = models.ForeignKey(Board, on_delete=models.CASCADE)
order = models.IntegerField()
score = models.IntegerField(default=0)
leader = models.BooleanField(default=False)
time = models.TimeField(default=time(0, 0, 0))
class Meta: ordering = ['board', 'order']
def __str__(self): return self.board.code + " " + self.user.username
def leave(self):
for player in self.board.players().filter(order__gt=self.order):
player.order -= 1
player.save()
self.delete()
if self.leader:
other_player = self.board.players().first()
if other_player:
other_player.leader = True
other_player.save()
else:
self.board.delete()
def promote(self):
other_player = self.board.players().get(order=self.order-1)
other_player.order += 1
other_player.save()
self.order -= 1
self.save()
def demote(self):
other_player = self.board.players().get(order=self.order+1)
other_player.order -= 1
other_player.save()
self.order += 1
self.save()
def transfer(self):
other_player = self.board.players().get(leader=True)
other_player.leader = False
other_player.save()
self.leader = True
self.save()
class State(models.Model):
game_id = models.IntegerField()
turn = models.IntegerField(default=1)
stage = models.IntegerField(default=0)
ply = models.IntegerField(default=0)
previous = models.ForeignKey('State',
on_delete=models.CASCADE, null=True)
outcome = models.IntegerField(default=-1)
def next(self):
state = State.objects.create(
game_id=self.game_id,
turn=self.turn,
stage=self.stage,
ply=self.ply,
previous=self,
outcome=self.outcome
)
for row in self.pieces():
for piece in row:
if piece: piece.next(state)
return state
def end_stage(self, skip=1):
self.stage = self.stage + skip
self.save()
def end_turn(self, skip=1):
self.turn = self.turn % self.game().players + skip
self.ply = self.ply + 1
self.stage = 0
self.save()
def end_game(self, winner=0):
self.outcome = winner
self.save()
def set_piece(self, type_id, owner_id, x, y):
Piece.objects.filter(state=self, x=x, y=y).delete()
if type_id != -1: Piece.objects.create(
state=self,
type_id=type_id,
x=x, y=y,
owner_id=owner_id
)
Change.objects.create(state=self, x=x, y=y)
def place_piece(self, type, owner_id, x, y):
self.set_piece(type.id, owner_id, x, y)
def move_piece(self, x_from, y_from, x_to, y_to):
piece = Piece.objects.filter(state=self, x=x_from, y=y_from).get()
self.set_piece(-1, 0, x_from, y_from)
self.set_piece(piece.type_id, piece.owner_id, x_to, y_to)
def remove_piece(self, x, y):
self.set_piece(-1, 0, x, y)
def game(self):
return games[self.game_id]
def pieces(self):
piece_set = Piece.objects.filter(state=self)
pieces = []
for x in range(0, self.game().width):
col_set = piece_set.filter(x=x)
col = []
for y in range(0, self.game().height):
col.append(col_set.filter(y=y).first())
pieces.append(col)
return pieces
def changes(self):
return Change.objects.filter(state=self, state__ply=self.ply)
def modified(self, x, y):
return any(map(lambda c: c.x == x and c.y == y, self.changes()))
def to_dictionary(self):
return {
'game': self.game(),
'turn': self.turn,
'stage': self.stage,
'previous': self.previous,
'number': self.number,
'outcome': self.outcome,
'pieces': self.pieces()
}
class Change(models.Model):
state = models.ForeignKey(State, on_delete=models.CASCADE)
x = models.IntegerField()
y = models.IntegerField()
class Piece(models.Model):
state = models.ForeignKey(State,
on_delete=models.CASCADE)
type_id = models.IntegerField()
owner_id = models.IntegerField()
x = models.IntegerField()
y = models.IntegerField()
class Meta: ordering = ['state']
def __str__(self): return self.state.board.code + ":"\
+ str(self.state.number) + ":" + str(self.id)
def next(self, state):
return Piece.objects.create(
state=state,
type_id=self.type_id,
owner_id=self.owner_id,
x=self.x,
y=self.y
)
def type(self):
return self.state.game().types[self.type_id]
def texture(self):
return self.type().texture(self.owner_id)
def owner(self, board):
return board.players()[self.owner_id-1]
def to_dictionary(self):
return {
'state': self.state,
'type': self.type(),
'owner_id': self.owner_id,
'x': self.x,
'y': self.y,
'texture': self.texture()
}
class Message(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
message = models.CharField(max_length=500)
board = models.ForeignKey(Board, on_delete=models.CASCADE)
time = models.DateTimeField(auto_now=True)
class Meta: ordering = ['board', '-time']
def __str__(self): return self.board.code + ":" + str(self.id) | StarcoderdataPython |
4860738 | <filename>bigsi/matrix/bitmatrix.py
from bitarray import bitarray
NUM_ROWS_KEY = "number_of_rows"
NUM_COLS_KEY = "number_of_cols"
class BitMatrix(object):
"""
Manages the gets and sets of the bitmatrix to the various storage backends.
Does not know the concept of a kmer.
"""
def __init__(self, storage):
self.storage = storage
self.num_rows = self.storage.get_integer(NUM_ROWS_KEY)
self.num_cols = self.storage.get_integer(NUM_COLS_KEY)
@classmethod
def create(cls, storage, rows, num_rows, num_cols):
storage.set_bitarrays(range(num_rows), rows)
storage.set_integer(NUM_ROWS_KEY, num_rows)
storage.set_integer(NUM_COLS_KEY, num_cols)
storage.sync()
return cls(storage)
def get_row(self, row_index):
return self.storage.get_bitarray(row_index)[: self.num_cols]
def get_rows(self, row_indexes, remove_trailing_zeros=True):
## Only need to slice for merging (it's a lot slower)
# Takes advantage of batching in storage engine if available
bitarrays=self.storage.get_bitarrays(row_indexes)
if remove_trailing_zeros:
return (ba[: self.num_cols] for ba in bitarrays)
else:
return bitarrays
def set_row(self, row_index, bitarray):
return self.storage.set_bitarray(row_index, bitarray)
def set_rows(self, row_indexes, bitarrays):
# Takes advantage of batching in storage engine if available
return self.storage.set_bitarrays(row_indexes, bitarrays)
def set_num_cols(self, num_cols):
self.num_cols = num_cols
self.storage.set_integer(NUM_COLS_KEY, self.num_cols)
def get_column(self, column_index):
## This is very slow, as we index row-wise. Need to know the number of rows, so must be done elsewhere
return bitarray(
"".join(
[
str(int(i))
for i in self.storage.get_bits(
list(range(self.num_rows)), [column_index] * self.num_rows
)
]
)
)
def get_columns(self, column_indexes):
for column_index in column_indexes:
yield self.get_column(column_index)
def insert_column(self, bitarray, column_index):
## This is very slow, as we index row-wise
self.storage.set_bits(
list(range(len(bitarray))),
[column_index] * len(bitarray),
bitarray.tolist(),
)
if column_index >= self.num_cols:
self.set_num_cols(self.num_cols + 1)
| StarcoderdataPython |
4944215 | import pymongo
import bson
from datetime import datetime, timedelta, date
from pytz import timezone
import pytz
mdb_client = pymongo.MongoClient("mongodb://localhost:27017/")
database_name = 'juice'
def setter_juice(setter_dict={}):
database = mdb_client[database_name]
cursor_object = database['comments']
if bool(setter_dict):
return_obj = cursor_object.insert_one(setter_dict)
return return_obj.inserted_id
else:
return 0
def getter_juice(finder_dict = {}, projection_dict = {}):
database = mdb_client[database_name]
cursor_object = database['comments']
projection_dict['_id'] = False
cursor_result = cursor_object.find(finder_dict, projection = projection_dict)
return list(cursor_result)
def del_comments_all():
database = mdb_client[database_name]
cursor_object = database['comments']
cursor_object.drop() | StarcoderdataPython |
9627259 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 20 12:17:22 2018
@author: eemeg
An example of the ICASAR software with synthetic data
"""
#%% Imports
import numpy as np
import matplotlib.pyplot as plt
import pickle # used for opening synthetic data
from pathlib import Path
import icasar
from icasar.icasar_funcs import ICASAR
from icasar.aux import col_to_ma, r2_to_r3
#%% Things to set
ICASAR_settings = {"n_comp" : 5, # number of components to recover with ICA (ie the number of PCA sources to keep)
"bootstrapping_param" : (200, 0), # (number of runs with bootstrapping, number of runs without bootstrapping) "hdbscan_param" : (35, 10), # (min_cluster_size, min_samples)
"tsne_param" : (30, 12), # (perplexity, early_exaggeration)
"ica_param" : (1e-2, 150), # (tolerance, max iterations)
"hdbscan_param" : (35,10), # (min_cluster_size, min_samples) Discussed in more detail in Mcinnes et al. (2017). min_cluster_size sets the smallest collection of points that can be considered a cluster. min_samples sets how conservative the clustering is. With larger values, more points will be considered noise.
"out_folder" : Path('example_spatial_01_outputs'), # outputs will be saved here
"figures" : "png+window"} # if png, saved in a folder as .png. If window, open as interactive matplotlib figures,
# if 'png+window', both.
# default is "window" as 03_clustering_and_manifold is interactive.
#%% Import the data
with open('synthetic_data.pkl', 'rb') as f:
A_dc = pickle.load(f) # these are the time courses and are column vectors. They control the strength through time of each of the synthetic sources.
S_synth = pickle.load(f) # these are the synthetic (spatial) sources and are row vectors.
N_dc = pickle.load(f) # these are the noise for each interferogarm.
pixel_mask = pickle.load(f) # this is the same shape as an interferogram, True for pixels that are masked
lons = pickle.load(f) # the longitudes of the lower left corner of each pixel (i.e. a rank 2 array)
lats = pickle.load(f) # the latitues of the lower left corner of each pixel.
#%% Make synthetic time series and view it
X_dc = A_dc @ S_synth + N_dc # do the mixing
phUnw = X_dc # mixtures are the unwrapped phase
fig1, axes = plt.subplots(2,3) # plot the synthetic sources
for i in range(3):
axes[0,i].imshow(col_to_ma(S_synth[i,:], pixel_mask))
axes[1,i].plot(range(A_dc.shape[0]), A_dc[:,i])
axes[1,i].axhline(0)
fig1.suptitle('Synthetic sources and time courses')
fig1.canvas.set_window_title("Synthetic sources and time courses")
fig2, axes = plt.subplots(2,5) # plot the synthetic interferograms
for i, ax in enumerate(np.ravel(axes[:])):
ax.imshow(col_to_ma(phUnw[i,:], pixel_mask))
fig2.suptitle('Mixtures (intererograms)')
fig2.canvas.set_window_title("Mixtures (intererograms)")
fig3, axes = plt.subplots(1,3, figsize = (11,4)) # plot a schematic of how the data are organised
axes[0].imshow(X_dc, aspect = 500)
axes[0].set_title('Data matix')
axes[1].imshow(pixel_mask)
axes[1].set_title('Mask')
axes[2].imshow(col_to_ma(X_dc[0,:], pixel_mask))
axes[2].set_title('Interferogram 1')
fig3.canvas.set_window_title("Interferograms as row vectors and a mask")
#%% do ICA with ICSAR function
spatial_data = {'mixtures_r2' : phUnw,
'mask' : pixel_mask,
'lons' : lons, # for the simplest case, these aren't needed
'lats' : lats} # for the simplest case, these aren't needed
S_best, time_courses, x_train_residual_ts, Iq, n_clusters, S_all_info, phUnw_mean = ICASAR(spatial_data = spatial_data, **ICASAR_settings)
#%% We can reconstruct the data using the sources and timecourses, but don't forget that ICA returns mean centered sources
X_dc_reconstructed = (time_courses @ S_best) + phUnw_mean # here we add the mean back
X_dc_reconstructed_source0 = (time_courses[:,0:1] @ S_best[0:1,:]) + phUnw_mean # and remake the entire time series using only IC0
ifg_n = 0 # choose an ifg to plot
fig4, axes = plt.subplots(1,3, figsize = (11,4)) # plot a schematic of how the data are organised
im1 = axes[0].imshow(col_to_ma(X_dc[ifg_n,], pixel_mask))
axes[0].set_title('Original Ifg.')
fig4.colorbar(im1, ax = axes[0])
im2 = axes[1].imshow(col_to_ma(X_dc_reconstructed[ifg_n,], pixel_mask))
axes[1].set_title('Reconstructed Ifg.')
fig4.colorbar(im2, ax = axes[1])
im3 = axes[2].imshow(col_to_ma(X_dc_reconstructed_source0[ifg_n,], pixel_mask))
axes[2].set_title('Reconstructed Ifg. \n (IC0 only)')
fig4.colorbar(im2, ax = axes[2])
fig4.canvas.set_window_title("Reconstructed Data")
#%% Note that the amount of bootstrapping done by ICASAR can also be controlled, and seen in the clustering and 2d manifold plot:
ICASAR_settings["bootstrapping_param"] = (100, 100) # (number of runs with bootstrapping, number of runs without bootstrapping)
ICASAR_settings['out_folder'] = 'example_spatial_01_outputs_part2'
spatial_data = {'mixtures_r2' : phUnw,
'mask' : pixel_mask,
'lons' : lons,
'lats' : lats}
S_best, time_courses, x_train_residual_ts, Iq, n_clusters, S_all_info, phUnw_mean = ICASAR(spatial_data = spatial_data, **ICASAR_settings)
| StarcoderdataPython |
1687457 | <reponame>cpgoncalves/gameplayer
# <NAME> (2015), Game Theory with Python
# Game Theory and Applied A.I. Classes
# Instituto Superior de Ciências Sociais e Políticas (ISCSP)
# University of Lisbon
# <EMAIL>
#
# Three Person's Prisonners' Dilemma Game
#
# For more details see the user manual that comes with the package:
# Gonçalves, C.P. (2015) "Game Player User Manual - A Game Theory Analyzer With Python",
# https://sites.google.com/site/autonomouscomputingsystems/game-player
import gamep
config = []
gamep.addAlternative(["Cooperate","Cooperate","Cooperate"],[2,2,2],config)
gamep.addAlternative(["Cooperate","Cooperate","Defect"],[2.5,2.5,1],config)
gamep.addAlternative(["Cooperate","Defect","Cooperate"],[2.5,1,2.5],config)
gamep.addAlternative(["Cooperate","Defect","Defect"],[0,2,2],config)
gamep.addAlternative(["Defect","Cooperate","Cooperate"],[1,2.5,2.5],config)
gamep.addAlternative(["Defect","Cooperate","Defect"],[2,0,2],config)
gamep.addAlternative(["Defect","Defect","Cooperate"],[2,2,0],config)
gamep.addAlternative(["Defect","Defect","Defect"],[1,1,1],config)
gamep.analyzeGame(config,True)
| StarcoderdataPython |
124835 | """empty message
Revision ID: 09d3732eef24
Revises: <PASSWORD>
Create Date: 2020-03-12 15:13:32.832239
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '09d3732eef24'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('task',
sa.Column('id', sa.String(length=50), nullable=False),
sa.Column('amount', sa.Numeric(precision=10, scale=2), nullable=True),
sa.Column('trigger', sa.String(length=10), nullable=True),
sa.Column('category', sa.String(length=100), nullable=True),
sa.Column('type', sa.String(length=20), nullable=True),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('trigger_kwargs', sa.String(length=200), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('task')
# ### end Alembic commands ###
| StarcoderdataPython |
11375522 | <reponame>theRealNonso/dappertask<filename>dappertask/api/urls.py
from django.conf.urls import url
from django.urls import include
from rest_framework import routers
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter
import api.views as dv
router = DefaultRouter(trailing_slash=False)
app_router = routers.DefaultRouter()
urlpatterns = [
# documentation
url(r'^docs/', include_docs_urls(title='Dappertask Backend API', public=True)),
# url(r'^auth/', include('rest_framework_social_oauth2.urls')),
# User management and registration urls
url('accounts/', include('rest_registration.api.urls')),
# tontine urls
url(r'^api/', include(app_router.urls)),
# url(regex=r'^tontine/addmember/$',view=tv.AddMemberAndTontine.as_view(),name='addmember'),
]
| StarcoderdataPython |
9642764 | <filename>wsgi/static/wsgi/static/wsgi/static/wsgi/static/wsgi/static/wsgi/static/posts/templatetags/getProfile.py
from django import template
from profiles.models import Profile
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
register= template.Library()
@register.filter
def getProfile(user):
try:
profile=Profile.objects.get(user=user)
return profile.id
except:
return HttpResponseRedirect('/') | StarcoderdataPython |
1788887 | """Everything related to O2 power. Hoooo baby."""
import math
import numpy as np
import pandas as pd
from . import util
def o2_power_ss(speed_series, grade_series=None):
"""Calculates steady-state oxygen consumption in the moderate domain.
For more info, see `heartandsole_local/heartandsole/powerutils.py`.
"""
if grade_series is None:
grade_series = pd.Series([0 for i in range(len(speed_series))])
c_i_series = grade_series.apply(cost_of_inclined_treadmill_running)
c_aero_series = speed_series.apply(cost_of_wind_running)
# Combine the two components that contribute to the metabolic cost of
# running.
c_r_series = c_i_series + c_aero_series
# Instantaneous running power (W/kg) is simply cost of running
# (J/kg/m) multiplied by speed (m/s).
power_series = (c_i_series + c_aero_series) * speed_series
# Updated to account for the fact that the horizontal speed is
# measured, but cost of running relates to the distance along the
# incline.
power_series = power_series / np.cos(np.arctan(grade_series))
return power_series
def o2_power(speed_series, grade_series=None, time_series=None, tau=20):
"""Calculate O2 consumption in the moderate domain as a time series.
Args:
tau: Time constant for the exponentially-weighted moving average.
According to (Poole and Jones, 2012), this constant can vary from
10s to over 100s. It decreases with training, reflecting a more
responsive cardiovascular system. Default 20.
Note:
* In the heavy domain, the slow component of O2 consumption
kicks in, steady-state O2 consumption is higher than predicted
by this algorithm, and the steady state does not occur for up
to 20 minutes (longer than this algorithm predicts.)
* In the severe domain, the critical power has been exceeded,
and O2 consumption will reach VO2 max. The slow component
may play a role, but this role diminishes as the work rate
increases. In the extreme case, VO2max is barely
reached before fatigue sets in - slow component likely
has no role in oxygen kinetics here.
* In the extreme (?) domain, the work rate is so high that
fatigue sets in before VO2max can be attained.
"""
if time_series is None:
time_series = pd.Series([i for i in range(len(speed_series))])
# Calculate the theoretical steady-state power associated with the
# speed and grade value at each timestep.
power_inst = o2_power_ss(speed_series, grade_series=grade_series)
halflife = tau * math.log(2)
# Even if we have uniform 1-second samples, I would still need this
# util function. It makes the average start at 0 and trend up, rather
# than letting the first value have all the weight.
return util.ewma(
power_inst,
halflife,
time_series=time_series,
)
def run_power(speed, grade=0.0):
return run_cost(speed, grade=grade) * speed / math.cos(math.atan(grade))
def run_cost(speed, grade=0.0):
"""Calculates the metabolic cost of running.
See the documentation for powerutils.o2_power_ss for information
on the scientific basis for this calculation.
Args:
speed (float): Running speed in meters per second.
grade (float): Decimal grade, i.e. 45% = 0.45.
Returns:
float: Cost of running on an incline in still air, in Joules/kg/m,
with distance measured along the incline slope.
"""
# grade = grade or 0.0
# Use that Minetti curve.
c_i = cost_of_inclined_treadmill_running(grade)
# Pugh and <NAME> tell us the cost of resisting wind.
c_aero = cost_of_wind_running(speed)
return c_i + c_aero
def cost_of_inclined_treadmill_running(grade):
"""Calculates the cost of inclined running according to Minetti.
This is how much metabolic energy it costs (J) to move a unit
body mass (kg) a unit distance (m) along a treadmill belt surface
at a steady-state (after running on this treadmill for 4 minutes).
This metabolic energy cost is estimated based on the amount of
oxygen you are consuming, and assumes a specific type of fuel is
being used (mostly carbohydrate, with a dash of fat, and no protein).
This gives an estimate of 20.9 kJ of energy per liter of oxygen
consumed.
For more info, see `heartandsole_local/heartandsole/powerutils.py`,
specifically the documentation for `o2_power_tendency`.
Args:
grade (float): Decimal grade, i.e. 20% = 0.20.
Returns:
float: Cost of running, in Joules/kg/m according to Minetti curve.
"""
# Clip the grade value so we don't use the curve outside its limits
# of applicability.
# TODO: Find a better way to handle the shittiness of the Minetti
# curve. Maybe find a way to extrapolate for steeper grades based
# on an assumed efficiency of lifting/lowering...0.25??
grade = max(-0.45, min(grade, 0.45))
# Calculate metabolic cost of running (neglecting air resistance),
# in Joules per meter traveled per kg of body weight, as a function of
# decimal grade (on a treadmill, technically). From (Minetti, 2002).
# Valid for grades shallower than 45% (0.45 in decimal form).
c_i = 155.4 * grade ** 5 - 30.4 * grade ** 4 - 43.3 * grade ** 3 \
+ 46.3 * grade ** 2 + 19.5 * grade + 3.6
return c_i
def cost_of_wind_running(speed):
"""Calculate metabolic cost of running against wind resistance.
Assumes zero wind speed. From (Pugh, 1971) & (<NAME>, 1993).
eta_aero is the efficiency of conversion of metabolic energy into
mechanical energy when working against a headwind.
k is the air friction coefficient, in J s^2 m^-3 kg^-1,
which makes inherent assumptions about the local air density
and the runner's projected area and body weight.
Args:
speed (float): Running speed in meters per second.
Returns:
float: Aerodynamic cost of running, in Joules per meter traveled
per kg of body weight, as a function
TODO:
* Revisit whether 0.5 is an appropriate efficiency value...
I recall it might have something to do with the speed at which
the work is being done.
"""
eta_aero = 0.5
k = 0.01
c_aero = k * speed ** 2 / eta_aero
return c_aero | StarcoderdataPython |
199237 | <reponame>pygaur/gsshop<gh_stars>1-10
from django.apps import AppConfig
class GsShippingConfig(AppConfig):
name = 'gs_shipping'
| StarcoderdataPython |
9736255 | # This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests of the transcription and translation methods of Seq objects."""
import unittest
from Bio import Seq
from Bio import Alphabet
from Bio.Alphabet import IUPAC
class TestTranscriptionTranslation(unittest.TestCase):
def test_transcription(self):
s = "ATA"
dna = Seq.Seq(s, IUPAC.unambiguous_dna)
rna = dna.transcribe()
self.assertEqual(str(rna), "AUA")
s = "GAAAATTCATTTTCTTTGGACTTTCTCTGAAATCCGAGTCCTAGGAAAGATGCGTGAGATTCTTCATATT"
dna = Seq.Seq(s, IUPAC.unambiguous_dna)
rna = dna.transcribe()
self.assertEqual(
str(rna),
"GAAAAUUCAUUUUCUUUGGACUUUCUCUGAAAUCCGAGUCCUAGGAAAGAUGCGUGAGAUUCUUCAUAUU",
)
s = "GAAAAUUCAUUUUCUUUGGACUUUCUCUGAAAUCCGAGUCCUAGGAAAGAUGCGUGAGAUUCUUCAUAUU"
rna = Seq.Seq(s, IUPAC.unambiguous_rna)
dna = rna.back_transcribe()
self.assertEqual(
str(dna),
"GAAAATTCATTTTCTTTGGACTTTCTCTGAAATCCGAGTCCTAGGAAAGATGCGTGAGATTCTTCATATT",
)
def test_translation(self):
s = ""
dna = Seq.Seq(s, IUPAC.unambiguous_dna)
protein = dna.translate(to_stop=True)
self.assertEqual(str(protein), "")
s = "TAA"
dna = Seq.Seq(s, IUPAC.unambiguous_dna)
protein = dna.translate(to_stop=True)
self.assertEqual(str(protein), "")
s = "GAAAATTCATTTTCTTTGGACTTTCTCTGAAATCCGAGTCCTAGGAAAGATGCGTGAGATTCTTCA"
dna = Seq.Seq(s, IUPAC.unambiguous_dna)
protein = dna.translate(to_stop=True)
self.assertEqual(str(protein), "ENSFSLDFL")
s = "GAA"
dna = Seq.Seq(s, IUPAC.unambiguous_dna)
protein = dna.translate(15, to_stop=True)
self.assertEqual(str(protein), "E")
s = "ATA"
dna = Seq.Seq(s, IUPAC.unambiguous_dna)
protein = dna.translate("Vertebrate Mitochondrial", to_stop=True)
self.assertEqual(str(protein), "M")
s = "GAAAATTCATTTTCTTTGGACTTTCTCTGAAATCCGAGTCCTAGGAAAGATGCGTGAGATTCTTCATAT"
dna = Seq.Seq(s, IUPAC.unambiguous_dna)
protein = dna.translate("SGC8", to_stop=True)
self.assertEqual(str(protein), "ENSFSLDFLWNPSPSNDAWDSSY")
def test_dna_rna_translation(self):
s = "TCAAAAAGGTGCATCTAGATG"
dna = Seq.Seq(s, IUPAC.unambiguous_dna)
protein = dna.translate(to_stop=True)
self.assertIsInstance(protein.alphabet, IUPAC.IUPACProtein)
self.assertEqual(str(protein), "SKRCI")
gapped_protein = dna.translate()
self.assertIsInstance(gapped_protein.alphabet, Alphabet.HasStopCodon)
self.assertEqual(str(gapped_protein), "SKRCI*M")
# The table used here has "AGG" as a stop codon:
p2 = dna.translate(table=2, to_stop=True)
self.assertEqual(str(p2), "SK")
p2 = dna.translate(table=2)
self.assertEqual(str(p2), "SK*CI*M")
p2 = dna.translate(table=2, stop_symbol="+")
self.assertEqual(str(p2), "SK+CI+M")
r = s.replace("T", "U")
rna = Seq.Seq(r, IUPAC.unambiguous_rna)
protein = rna.translate(to_stop=True)
self.assertIsInstance(protein.alphabet, IUPAC.IUPACProtein)
self.assertEqual(str(protein), "SKRCI")
gapped_protein = rna.translate()
self.assertEqual(str(gapped_protein), "SKRCI*M")
def test_ambiguous(self):
s = "RATGATTARAATYTA"
dna = Seq.Seq(s, IUPAC.ambiguous_dna)
protein = dna.translate("Vertebrate Mitochondrial")
self.assertEqual(str(protein), "BD*NL")
stop_protein = dna.translate("SGC1", to_stop=True)
self.assertEqual(str(stop_protein), "BD")
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| StarcoderdataPython |
9680675 | # coding: utf-8
import os
import io
import pytest
import tarfile
import tempfile
from mock import patch, Mock, MagicMock
import requests
from spacy_lefff import Downloader
from spacy_lefff.melt_tagger import URL_MODEL
def test_url_model():
assert requests.get(URL_MODEL).status_code == 200
def _mock_response(
status=200,
content="CONTENT",
json_data=None,
headers=None,
raise_for_status=None,
iter_content=None):
'''
Mocking get requests response.
'''
mock_resp = Mock()
# mock raise_for_status call w/optional error
mock_resp.raise_for_status = Mock()
if raise_for_status:
mock_resp.raise_for_status.side_effect = raise_for_status
# set status code and content
mock_resp.status_code = status
mock_resp.content = content
mock_resp.headers = headers
mock_resp.iter_content = MagicMock()
mock_resp.iter_content.__iter__.return_value = iter_content
# add json data if provided
if json_data:
mock_resp.json = Mock(
return_value=json_data
)
return mock_resp
@pytest.fixture(scope='session')
def _tmp_dir(tmpdir_factory):
'''
Creating a tar file from a test file in a tmp folder.
'''
dirtmp = tmpdir_factory.mktemp('models')
tmpfile = dirtmp.join('model')
tmpfile.write('TEST')
tar = tarfile.open(os.path.join(dirtmp.strpath, "model.tar.gz"), "w:gz")
tar.add(tmpfile.strpath, 'model')
tar.close()
os.remove(tmpfile.strpath)
return dirtmp
@patch('spacy_lefff.downloader.requests.get')
@patch('spacy_lefff.downloader.tarfile')
def test_downloader(mock_tarfile, mock_get, _tmp_dir):
content_disposition = 'attachment; filename="model.tar.gz"; filename*=UTF-8''model.tar.gz'
model_tarfile = tarfile.open(os.path.join(_tmp_dir.strpath, 'model.tar.gz'), 'r:gz')
headers = {'content-disposition': content_disposition, 'content-length': 100000}
mock_resp = _mock_response(headers=headers)
mock_get.return_value = mock_resp
mock_tarfile.open.return_value = model_tarfile
d = Downloader('test', download_dir=_tmp_dir.strpath, url=URL_MODEL)
test_folder = os.path.join(_tmp_dir.strpath, 'test')
m = os.path.join(test_folder, 'model')
assert len(_tmp_dir.listdir()) == 2 #test folder, temp model tar
f = io.open(m, mode='r', encoding='utf-8')
#checking if untar model is the same as the one in _tmp_dir tar file
assert str(f.read()) == 'TEST'
@patch('spacy_lefff.downloader.requests.get')
def test_downloader_failed(mock_get, _tmp_dir):
mock_resp = _mock_response()
mock_get.return_value = mock_resp
with pytest.raises(Exception) as e_info:
d = Downloader('test', download_dir=_tmp_dir.strpath, url='')
assert e_info.value.message == "Couldn't fetch model data."
def test_downloader_data_already_set_up(_tmp_dir, caplog):
'''
Testing if data is already set up,
meaning folder named 'test' already in download_dir
'''
if not os.path.exists(os.path.join(_tmp_dir.strpath, 'test')):
os.mkdir(os.path.join(_tmp_dir.strpath, 'test'))
d = Downloader('test', download_dir=_tmp_dir.strpath, url=URL_MODEL)
assert caplog.records[0].levelname == 'INFO'
assert 'data already set up' in caplog.text
| StarcoderdataPython |
8006330 | from .builder import build_dataloader
from .collate import default_collate
| StarcoderdataPython |
5089021 | from abc import ABC, abstractmethod
import pathlib
from sklearn import base
import _pickle as cPickle
import numpy as np
import pandas as pd
import os
from enum import Enum
from typing import Tuple
from py_neuromodulation import \
(nm_projection,
nm_rereference,
nm_run_analysis,
nm_features,
nm_resample,
nm_define_nmchannels,
nm_IO,
nm_plots,
nm_test_settings)
from py_neuromodulation import nm_notch_filter
class GRIDS(Enum):
"""Definition of possible projection grid types"""
CORTEX="cortex"
SUBCORTEX="subcortex"
class PNStream(ABC):
resample: nm_resample.Resample
features: nm_features.Features
run_analysis: nm_run_analysis.Run
rereference: nm_rereference.RT_rereference
notch_filter : nm_notch_filter.NotchFilter
projection: nm_projection.Projection
settings: dict
nm_channels: pd.DataFrame
coords: dict = {}
fs: float
line_noise: float
VERBOSE: bool
PATH_SETTINGS: str
PATH_NM_CHANNELS: str = str()
PATH_OUT: str = str()
PATH_GRIDS: str = str()
feature_arr: pd.DataFrame = pd.DataFrame()
CH_NAMES_USED: list
CH_TYPES_USED: list
FEATURE_IDX: list
LABEL_IDX: list
grid_cortex: np.array
grid_subcortex: np.array
sess_right: bool = None
feature_add: pd.DataFrame
model: base.BaseEstimator
@abstractmethod
def __init__(self,
PATH_SETTINGS=os.path.join(pathlib.Path(__file__).parent.resolve(),\
"nm_settings.json"),
PATH_NM_CHANNELS:str = str(),
PATH_OUT:str = os.getcwd(),
PATH_GRIDS:str = pathlib.Path(__file__).parent.resolve(),
VERBOSE:bool = True) -> None:
self.PATH_SETTINGS = PATH_SETTINGS
self.PATH_NM_CHANNELS = PATH_NM_CHANNELS
self.PATH_OUT = PATH_OUT
self.VERBOSE = VERBOSE
self.settings = nm_IO.read_settings(self.PATH_SETTINGS)
nm_test_settings.test_settings(self.settings)
if True in [self.settings["methods"]["project_cortex"],
self.settings["methods"]["project_subcortex"]]:
self.grid_cortex, self.grid_subcortex = self.get_grids(
self.settings,
self.PATH_GRIDS,
GRIDS
)
@abstractmethod
def _add_coordinates(self) -> None:
"""Write self.coords either from bids or from separate file
This method is implemented differently
for BIDS and real time data anylsis
"""
pass
@abstractmethod
def get_data(self, ) -> np.array:
"""Get new data batch from acquisition device or from BIDS"""
pass
@abstractmethod
def run(self, ):
"""In this function data is first acquied iteratively
1. self.get_data()
2. data processing is called:
self.run_analysis.process_data(data) to calculate features
3. optional postprocessing
e.g. plotting, ML estimation is done
"""
pass
@abstractmethod
def _add_timestamp(self, feature_series: pd.Series, idx:int=None) -> pd.Series:
"""Add to feature_series "time" keyword
For Bids specify with fs_features, for real time analysis with current time stamp
"""
pass
def load_model(self, model_name: str):
"""Load sklearn model, that utilizes predict"""
with open(model_name, 'rb') as fid:
self.model = cPickle.load(fid)
def _set_run(self):
"""Initialize preprocessing, and feature estimation modules
"""
self.CH_NAMES_USED, self.CH_TYPES_USED, self.FEATURE_IDX, self.LABEL_IDX = \
self._get_ch_info(self.nm_channels)
self.features = self._set_features(self.settings,
self.CH_NAMES_USED,
self.fs,
self.line_noise,
self.VERBOSE
)
self.resample = self._set_resampling(self.settings, self.fs)
self.rereference, self.nm_channels = self._set_rereference(
self.settings, self.nm_channels
)
self.notch_filter = self._set_notch_filer(
settings = self.settings,
fs = self.fs
if self.settings["methods"]["raw_resampling"] is False
else
self.settings["raw_resampling_settings"]["resample_freq"],
line_noise = self.line_noise
)
self.projection = self._get_projection(self.settings, self.nm_channels)
if self.projection is not None:
self.sess_right = self._get_sess_lat(self.coords)
else:
self.sess_right = None
self.run_analysis = nm_run_analysis.Run(
features=self.features,
settings=self.settings,
reference=self.rereference,
projection=self.projection,
resample=self.resample,
notch_filter=self.notch_filter,
verbose=self.VERBOSE,
feature_idx=self.FEATURE_IDX
)
def _set_features(self, settings:dict,
CH_NAMES_USED: list,
fs: int,
line_noise: int,
VERBOSE:bool) -> None:
"""initialize feature class from settings"""
return nm_features.Features(
settings,
CH_NAMES_USED,
fs,
line_noise,
VERBOSE
)
def set_fs(self, fs: int) -> None:
self.fs = fs
def _set_rereference(
self,
settings:dict,
nm_channels:pd.DataFrame
) -> tuple[nm_rereference.RT_rereference, pd.DataFrame]:
"""Initialize nm_rereference and update nm_channels
nm_channels are updated if no rereferencing is specified
Parameters
----------
settings : dict
[description]
nm_channels : pd.DataFrame
[description]
Returns
-------
Tuple
nm_rereference object, updated nm_channels DataFrame
"""
if settings["methods"]["re_referencing"] is True:
rereference = nm_rereference.RT_rereference(
nm_channels, split_data=False)
else:
rereference = None
# reset nm_channels from default values
nm_channels["rereference"] = None
nm_channels["new_name"] = nm_channels["name"]
return rereference, nm_channels
def _set_resampling(self, settings:dict, fs: int) -> nm_resample.Resample:
"""Initialize Resampling
Parameters
----------
settings : dict
fs : int
Returns
-------
nm_resample.Resample
"""
if settings["methods"]["raw_resampling"] is True:
resample = nm_resample.Resample(settings, fs)
else:
resample = None
return resample
def _set_notch_filer(self,
settings : dict,
fs : int,
line_noise : int,
notch_widths : int = 3,
trans_bandwidth : int = 15
) -> nm_notch_filter.NotchFilter:
if settings["methods"]["notch_filter"] is True:
notch_filter = nm_notch_filter.NotchFilter(
fs=fs,
line_noise=line_noise,
notch_widths=notch_widths,
trans_bandwidth=trans_bandwidth
)
else:
notch_filter = None
return notch_filter
def set_linenoise(self, line_noise: int) -> None:
self.line_noise = line_noise
@staticmethod
def get_grids(
settings: dict(),
PATH_GRIDS: str,
GRID_TYPE: GRIDS
) -> Tuple:
"""Read settings specified grids
Parameters
----------
settings : dict
PATH_GRIDS : str
GRID_TYPE : GRIDS
Returns
-------
Tuple
grid_cortex, grid_subcortex,
might be None if not specified in settings
"""
if settings["methods"]["project_cortex"] is True:
grid_cortex = nm_IO.read_grid(PATH_GRIDS, GRID_TYPE.CORTEX)
else:
grid_cortex = None
if settings["methods"]["project_subcortex"] is True:
grid_subcortex = nm_IO.read_grid(PATH_GRIDS, GRID_TYPE.SUBCORTEX)
else:
grid_subcortex = None
return grid_cortex, grid_subcortex
def _get_projection(
self,
settings: dict,
nm_channels: pd.DataFrame
) -> nm_projection.Projection:
"""Return projection of used coordinated and grids"""
if any((settings["methods"]["project_cortex"],
settings["methods"]["project_subcortex"])):
projection = nm_projection.Projection(
settings=settings,
grid_cortex=self.grid_cortex,
grid_subcortex=self.grid_subcortex,
coords=self.coords,
nm_channels=nm_channels,
plot_projection=False
)
else:
projection = None
return projection
@staticmethod
def _get_ch_info(nm_channels: pd.DataFrame):
"""Get used feature and label info from nm_channels"""
CH_NAMES_USED = nm_channels[nm_channels["used"] == 1]["new_name"].tolist()
CH_TYPES_USED = nm_channels[nm_channels["used"] == 1]["type"].tolist()
# used channels for feature estimation
FEATURE_IDX = np.where(nm_channels["used"] &
~nm_channels["target"])[0].tolist()
# If multiple targets exist, select only the first
LABEL_IDX = np.where(nm_channels["target"] == 1)[0]
return CH_NAMES_USED, CH_TYPES_USED, FEATURE_IDX, LABEL_IDX
@staticmethod
def _get_nm_channels(PATH_NM_CHANNELS:str, **kwargs) -> None:
"""Read nm_channels from path or specify via BIDS arguments.
Nexessary parameters are then
ch_names (list),
ch_types (list),
bads (list)
ECOG_Only (bool)"""
if PATH_NM_CHANNELS and os.path.isfile(PATH_NM_CHANNELS):
nm_channels = pd.read_csv(PATH_NM_CHANNELS)
elif None not in [kwargs.get('ch_names', None),
kwargs.get('ch_types', None),
kwargs.get('bads', None),
kwargs.get('ECOG_ONLY', None)]:
nm_channels = nm_define_nmchannels.set_channels_by_bids(
ch_names=kwargs.get('ch_names'),
ch_types=kwargs.get('ch_types'),
bads=kwargs.get('bads'),
ECOG_ONLY=kwargs.get('ECOG_ONLY'))
return nm_channels
@staticmethod
def _get_sess_lat(coords):
if len(coords["cortex_left"]["positions"]) == 0:
sess_right = True
elif len(coords["cortex_right"]["positions"]) == 0:
sess_right = False
return sess_right
def save_sidecar(self, folder_name: str):
"""Save sidecar incuding fs, line_noise, coords, sess_right to
PATH_OUT and subfolder 'folder_name'"""
sidecar = {
"fs" : self.fs,
"line_noise" : self.line_noise,
"coords" : self.coords,
"sess_right" : self.sess_right
}
if self.settings["methods"]["project_cortex"]:
sidecar["grid_cortex"] = self.grid_cortex
sidecar["proj_matrix_cortex"] = \
self.projection.proj_matrix_cortex
if self.settings["methods"]["project_subcortex"]:
sidecar["grid_subcortex"] = self.grid_subcortex
sidecar["proj_matrix_subcortex"] = \
self.projection.proj_matrix_subcortex
nm_IO.save_sidecar(sidecar, self.PATH_OUT, folder_name)
def save_settings(self, folder_name: str):
nm_IO.save_settings(self.settings, self.PATH_OUT, folder_name)
def save_nm_channels(self, folder_name: str):
nm_IO.save_nmchannels(self.nm_channels, self.PATH_OUT, folder_name)
def save_features(self, folder_name: str):
nm_IO.save_features(self.feature_arr, self.PATH_OUT, folder_name)
def save_after_stream(self, folder_name:str) -> None:
"""Save features, settings, nm_channels and sidecar after run"""
# create derivate folder_name output folder if doesn't exist
if os.path.exists(os.path.join(self.PATH_OUT, folder_name)) is False:
os.makedirs(os.path.join(self.PATH_OUT, folder_name))
self.save_sidecar(folder_name)
self.save_features(folder_name)
self.save_settings(folder_name)
self.save_nm_channels(folder_name)
def plot_cortical_projection(self):
"""plot projection of cortical grid electrodes on cortex"""
nmplotter = nm_plots.NM_Plot(ecog_strip=self.projection.ecog_strip,
grid_cortex=self.projection.grid_cortex,
sess_right=self.sess_right)
nmplotter.plot_cortex() | StarcoderdataPython |
5058418 | """ Tests for barbante.maintenance.product_templates_tfidf.py.
"""
import nose.tools
import datetime as dt
import barbante.maintenance.product_templates_tfidf as pttfidf
import barbante.maintenance.tasks as maintenance
from barbante.maintenance.tests.fixtures.MaintenanceFixture import MaintenanceFixture
import barbante.tests.dummy_data_populator as dp
import barbante.tests as tests
ACCEPTABLE_ON_THE_FLY_VS_FROM_SCRATCH_DEVIATION = 0.05
""" Since the DF of terms is dynamically updated, and we do not want to cope with
a strength update for each and every pair of products affected by a tiny modification
of some term's DF, we allow for a subtle variation between on-the-fly and from-scratch
values, by design.
"""
class TestProductTemplatesTfidf(MaintenanceFixture):
""" Test class for product templates based on tfidf similarity.
"""
def setup(self):
super().setup()
pttfidf.generate_templates(self.session_context)
def _find_language(self, product_id):
product = self.session_context.data_proxy.fetch_products([product_id], ["language"]).get(product_id)
if product is None:
raise AttributeError("invalid product_id")
return product.get("language", "unknown")
def test_templates(self):
""" Tests the product templates (tfidf) that are computed and saved in the db.
"""
# Economia
for i in range(1, dp.N_PROD_ECONOMIA + 1):
target = "p_eco_" + str(i)
templates = pttfidf.get_product_templates_tfidf(self.session_context, [target]).get(target, {})
language = self._find_language(target)
nose.tools.ok_(language not in [None, "unknown"], "Could not retrieve the language correctly")
if language == "portuguese":
nose.tools.ok_(len(templates) > 0,
"No templates were generated for product " + target)
nose.tools.eq_(templates[0][1][:6], "p_eco_",
"A questionable template order was obtained " +
"for product %s: %s" % (target, templates))
for _, template_id in templates:
nose.tools.ok_("expired" not in template_id, "An expired template was obtained")
# Esportes
for i in range(1, dp.N_PROD_ESPORTES + 1):
target = "p_esp_" + str(i)
templates = pttfidf.get_product_templates_tfidf(self.session_context, [target]).get(target, {})
language = self._find_language(target)
nose.tools.ok_(language not in [None, "unknown"], "Could not retrieve the language correctly")
if language == "portuguese":
nose.tools.ok_(len(templates) > 0,
"No templates were generated for product " + target)
nose.tools.eq_(templates[0][1][:6], "p_esp_",
"A questionable template order was obtained " +
"for product %s: %s" % (target, templates))
for _, template_id in templates:
nose.tools.ok_("expired" not in template_id, "An expired template was obtained")
# Musica
for i in range(1, dp.N_PROD_MUSICA + 1):
target = "p_mus_" + str(i)
templates = pttfidf.get_product_templates_tfidf(self.session_context, [target]).get(target, {})
language = self._find_language(target)
nose.tools.ok_(language not in [None, "unknown"], "Could not retrieve the language correctly")
if language == "portuguese":
nose.tools.ok_(len(templates) > 0,
"No templates were generated for product " + target)
nose.tools.eq_(templates[0][1][:6], "p_mus_",
"A questionable template order was obtained " +
"for product %s: %s" % (target, templates))
# Tecnologia
for i in range(1, dp.N_PROD_TECNOLOGIA + 1):
target = "p_tec_" + str(i)
templates = pttfidf.get_product_templates_tfidf(self.session_context, [target]).get(target, {})
language = self._find_language(target)
nose.tools.ok_(language not in [None, "unknown"], "Could not retrieve the language correctly")
if language == "portuguese":
nose.tools.ok_(len(templates) > 0,
"No templates were generated for product " + target)
nose.tools.eq_(templates[0][1][:6], "p_tec_",
"A questionable template order was obtained " +
"for product %s: %s" % (target, templates))
def test_templates_avoiding_almost_identical_products(self):
""" Tests whether nearly identical products are NOT templates of one another.
"""
for i in range(1, dp.N_PROD_NONSENSE):
target = "p_nonsense_" + str(i)
templates = pttfidf.get_product_templates_tfidf(self.session_context, [target]).get(target, {})
nose.tools.ok_(len(templates) > 0,
"No templates were generated for product " + target)
template_products = [t[1] for t in templates]
way_too_similar = "p_nonsense_" + str(i + 1)
nose.tools.ok_(way_too_similar not in template_products,
"Nearly identical templates!")
def test_product_product_strengths_tfidf_from_scratch_versus_incremental(self):
""" Tests whether the product x product strengths (TFIDF) generated on a step-by-step basis
match exactly those created from scratch.
"""
# inner method to compare strengths
def compare_strengths(pair_of_products):
strength1 = strengths_incremental.get(pair_of_products, 0.0)
strength2 = strengths_from_scratch[pair_of_products]
nose.tools.ok_(
"Strengths do not match for product pair (%s, %s): " % (pair_of_products[0], pair_of_products[1]) +
"[incremental --> %.6f] [from scratch --> %.6f]" % (strength1, strength2),
abs(strength1 - strength2) < ACCEPTABLE_ON_THE_FLY_VS_FROM_SCRATCH_DEVIATION)
# ---
# inner method to compare templates tfidf
def compare_templates(product):
templates1 = templates_incremental.get(product, (None, []))
templates2 = templates_from_scratch.get(product, (None, []))
nose.tools.eq_(len(templates1[1]), len(templates2[1]),
"Numbers of incremental and from-scratch templates do not match")
for idx in range(len(templates1[1])):
strength_incremental = templates1[1][idx][0]
strength_from_scratch = templates2[1][idx][0]
nose.tools.ok_(
abs(strength_incremental - strength_from_scratch) < ACCEPTABLE_ON_THE_FLY_VS_FROM_SCRATCH_DEVIATION,
"Templates do not approximately match for product %s: " % product +
"[incremental --> %s] [from scratch --> %s]" % (str(templates1), str(templates2)))
# ---
all_products = list(self.db_proxy.fetch_all_product_ids())
sentence = " produto para teste de atualização de similaridade via tfidf"
products = [{"external_id": product[0],
"resources": {"title": product[0]},
"date": self.session_context.get_present_date(),
"expiration_date": self.session_context.get_present_date() + dt.timedelta(days=30),
"full_content": product[1],
"language": "portuguese"} for product in
[("p_new_1", "Primeiro" + sentence),
("p_new_2", "Segundo" + sentence),
("p_new_3", "Terceiro" + sentence),
("p_new_4", "Quarto" + sentence)]]
# updates strengths after each new product
for product in products:
self.db_proxy.insert_product(product)
maintenance.process_product(self.session_context, product["external_id"])
# saves locally the strengths and the templates that were obtained incrementally
strengths_incremental = self.db_proxy.fetch_product_product_strengths_tfidf()
templates_incremental = self.db_proxy.fetch_product_templates(all_products)
# regenerates all strengths from scratch
pttfidf.generate_templates(self.session_context)
# saves locally the strengths and the templates that were obtained from scratch
strengths_from_scratch = self.db_proxy.fetch_product_product_strengths_tfidf()
templates_from_scratch = self.db_proxy.fetch_product_templates(all_products)
nose.tools.eq_(len(strengths_incremental), len(strengths_from_scratch),
"Number of non-zero strengths tfidf do not match")
for product_pair in strengths_from_scratch:
compare_strengths(product_pair)
for product_pair in strengths_incremental:
compare_strengths(product_pair)
for product in all_products:
compare_templates(product)
def test_multi_attribute_similarity(self):
""" Tests whether the product-product similarities respect the customer-defined weights and filters.
WARNING: This test relies heavily on the attributes' weight distribution below:
- full_content: 0.6 (TEXT - non-persistent)
- resources.title: 0.1 (TEXT - persistent)
- category: 0.1 (FIXED)
- source: 0.1 (LIST)
- price: 0.1 (NUMERIC)
The 'language' attribute is the only attribute configured to be used as filter.
Changing these settings in config/barbante_UnitTest.yml is a sure way to break the present test.
"""
product = "p_empty"
strengths = self.db_proxy.fetch_product_product_strengths_tfidf()
other_product = "p_empty_with_disjoint_title"
nose.tools.ok_(abs(strengths[(product, other_product)] - 0.9) < tests.FLOAT_DELTA,
"Wrong similarity (%s, %s)" % (product, other_product))
other_product = "p_empty_with_two_thirds_of_source_list"
nose.tools.ok_(abs(strengths[(product, other_product)] - (0.9 + 0.1 * 2/3)) < tests.FLOAT_DELTA,
"Wrong similarity (%s, %s)" % (product, other_product))
nose.tools.ok_(abs(strengths[(other_product, product)] - 1) < tests.FLOAT_DELTA,
"Wrong similarity (%s, %s)" % (other_product, product))
other_product = "p_empty_with_different_language"
nose.tools.ok_((product, other_product) not in strengths,
"Wrong similarity (%s, %s)" % (product, other_product))
other_product = "p_empty_with_different_category"
nose.tools.ok_(abs(strengths[(product, other_product)] - 0.9) < tests.FLOAT_DELTA,
"Wrong similarity (%s, %s)" % (product, other_product))
other_product = "p_empty_with_missing_category"
nose.tools.ok_(abs(strengths[(product, other_product)] - 0.9) < tests.FLOAT_DELTA,
"Wrong similarity (%s, %s)" % (product, other_product))
other_product = "p_empty_with_half_price"
nose.tools.ok_(abs(strengths[(product, other_product)] - 0.95) < tests.FLOAT_DELTA,
"Wrong similarity (%s, %s)" % (product, other_product))
nose.tools.ok_(abs(strengths[(other_product, product)] - 0.95) < tests.FLOAT_DELTA,
"Wrong similarity (%s, %s)" % (other_product, product))
# TODO (Vinicius) We are missing a test using a date-type attribute here
other_product = "p_empty_with_many_differences"
nose.tools.ok_(abs(strengths[(product, other_product)] - (0.6 + 0.1 * 2/3 + 0.1 * 0.5)) < tests.FLOAT_DELTA,
"Wrong similarity (%s, %s)" % (product, other_product))
nose.tools.ok_(abs(strengths[(other_product, product)] - (0.7 + 0.1 * 0.5)) < tests.FLOAT_DELTA,
"Wrong similarity (%s, %s)" % (other_product, product))
| StarcoderdataPython |
9772722 | <reponame>arosen93/jobflow<filename>tests/utils/test_enum.py
def test_value_enum():
from jobflow.utils import ValueEnum
class TempEnum(ValueEnum):
A = "A"
B = "B"
assert str(TempEnum.A) == "A"
assert str(TempEnum.B) == "B"
assert TempEnum.A == "A"
assert TempEnum.B == "B"
assert TempEnum.A.as_dict() == "A"
assert TempEnum.B.as_dict() == "B"
| StarcoderdataPython |
204964 | <reponame>troswell/pynet_terry
print "hello world"
print "something else"
for i in range(7):
print i
| StarcoderdataPython |
3288813 | <gh_stars>100-1000
# Modified Bessel function I_n(z) in the complex plane
cplot(lambda z: besseli(1,z), [-8,8], [-8,8], points=50000) | StarcoderdataPython |
5132440 | from ..src.test import conftest
| StarcoderdataPython |
6673621 | from elasticsearch_dsl.query import MultiMatch
from ..documents import TaskDocument, SkillDocument, UserDocument
def _search_skills(phrase):
prod_query = MultiMatch(
fields=['name', 'title', 'description'],
query=phrase,
type='cross_fields')
return SkillDocument.search().query(prod_query).sort('_score').source(
False)
def _search_users(phrase):
user_query = MultiMatch(
fields=['user', 'email', 'first_name', 'last_name'],
query=phrase,
type='cross_fields',
operator='and')
return UserDocument.search().query(user_query).source(False)
def _search_orders(phrase):
task_query = MultiMatch(
fields=['user', 'first_name', 'last_name', 'discount_name'],
query=phrase)
return TaskDocument.search().query(task_query).source(False)
def get_search_queries(phrase):
"""Return querysets to lookup different types of objects.
Args:
phrase (str): searched phrase
"""
return {
'skills': _search_skills(phrase),
'users': _search_users(phrase),
'tasks': _search_orders(phrase)}
def search(phrase):
"""Return all matching objects for dashboard views.
Composes independent search querysets into a single dictionary.
Args:
phrase (str): searched phrase
"""
return {k: s.to_queryset() for k, s in get_search_queries(phrase).items()}
| StarcoderdataPython |
1685969 | from errbot import BotPlugin, botcmd
from jenkinsapi.jenkins import Jenkins
from config import JENKINS_URL, JENKINS_USERNAME, JENKINS_PASSWORD
class Jenkinsbot(BotPlugin):
def connect_server(self):
server = Jenkins(JENKINS_URL, JENKINS_USERNAME, JENKINS_PASSWORD)
return server
@botcmd
def get_jenkins_version(self, mess, args):
return self.connect_server().version
@botcmd
def get_running_jobs(self, mess, args):
jobs = self.connect_server().get_jobs()
self.log.debug(jobs)
if jobs is None:
return "No running job!"
job_list = " "
for job_name, job_instance in jobs:
job_list += " *Job Name: " + job_instance.name + " Job Description: " + job_instance.get_description() + " Is Job running:"+ str(job_instance.is_running()) + " Is Job enabled:"+ str(job_instance.is_enabled()) +"*\n\n"
self.send_card(title='Current Jenkins Job Details',
body=job_list,
color='red',
in_reply_to=mess)
@botcmd
def get_jenkins_plugins(self, mess, args):
plugin_list = ""
for plugin in self.connect_server().get_plugins().values():
plugin_list +=" Short Name:" + plugin.shortName + " Long Name: " + plugin.longName + " Version: " + plugin.version + " URL: " + plugin.url + " Active: " + str(plugin.active) + " Enabled: " + str(plugin.enabled) + "\n\n"
self.send_card(title='Jenkins Plugins Status',
body=plugin_list,
color='red',
in_reply_to=mess)
@botcmd(split_args_with=None, admin_only=True)
def start_build(self, mess, args):
jobname = args.pop(0)
params = dict([(k, v) for k,v in zip (args[::2], args[1::2])])
self.connect_server().build_job(jobname, params)
self.send_card(title='Build Status?',
body='Triggered',
color='red',
in_reply_to=mess) | StarcoderdataPython |
4974651 | <filename>vectortween/ParametricAnimation.py
from functools import lru_cache
from sympy import Symbol
from sympy.parsing.sympy_parser import parse_expr
from vectortween.Animation import Animation
from vectortween.Tween import Tween
class ParametricAnimation(Animation):
"""
class to animate the value of a number between startframe and stopframe
tweening optionally can be applied (default is None, which means linear animation)
"""
def __init__(self, equation="t", tween=None, noise_fn=None):
"""
:param equation: parametric equation written as a string, expressed in terms of parameter "t".
:param tween: optional tweening specification
:param noise_fn: 2d function accepting a value ( equation(0) <= value <= equation(1)) and a time (0 <= t <= 1).
By accepting and using t, the noise is animated in time. By accepting but ignoring t, the noise is only spatial.
"""
if tween is None:
tween = ['linear']
if not equation:
equation = "t"
self.tween = tween
self.T = Tween(*tween)
self.noise_fn = noise_fn
self.equation = parse_expr(equation)
t = Symbol('t')
frm = self.equation.evalf(subs={t: 0})
to = self.equation.evalf(subs={t: 1})
super().__init__(frm, to)
def delayed_version(self, delay):
t = Symbol("t")
new_equation = self.equation.subs(t, t - delay)
def new_noise_fn(value, t):
return self.noise_fn(value, t - delay)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
def speedup_version(self, factor):
t = Symbol("t")
new_equation = self.equation.subs(t, t * factor)
def new_noise_fn(value, t):
return self.noise_fn(value, t * factor)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
def translated_version(self, amount):
new_equation = self.equation + amount
def new_noise_fn(value, t):
return self.noise_fn(value + amount, t)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
def scaled_version(self, amount):
new_equation = self.equation * amount
def new_noise_fn(value, t):
return self.noise_fn(value * amount, t)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
def scaled_translate_version(self, scale, offset):
new_equation = self.equation * scale + offset
def new_noise_fn(value, t):
return self.noise_fn(value * scale + offset, t)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
def timereversed_version(self):
t = Symbol("t")
new_equation = self.equation.subs(t, 1 - t)
def new_noise_fn(value, t):
return self.noise_fn(value, 1 - t)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
#@lru_cache(maxsize=1000)
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None):
"""
animation happens between startframe and stopframe
the value is None before aliveframe, and after deathframe
* if aliveframe is not specified it defaults to startframe
* if deathframe is not specified it defaults to stopframe
initial value is held from aliveframe to startframe
final value is held from stopfrome to deathframe
"""
if birthframe is None:
birthframe = startframe
if deathframe is None:
deathframe = stopframe
if frame < birthframe:
return None
if frame > deathframe:
return None
if frame < startframe:
return self.frm
if frame > stopframe:
return self.to
parameter_value = self.T.tween2(frame, startframe, stopframe)
t = Symbol('t')
if self.noise_fn is not None:
if noiseframe is not None:
nf = noiseframe
else:
nf = parameter_value
noise_value = self.noise_fn(frame, nf)
else:
noise_value = 0
return self.equation.evalf(subs={t: parameter_value}) + noise_value
| StarcoderdataPython |
8001905 | from distutils.core import setup
setup(
name = 'cayenneLPP',
packages = ['cayenneLPP'],
version = '0.1',
description = 'A module for the Cayenne Low Power Packet format',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/jojo-/py-cayenne-lpp', # use the URL to the github repo
download_url = 'https://github.com/jojo-/py-cayenne-lpp/archive/0.4.tar.gz',
keywords = ['Cayenne', 'LPP', 'LoRa', 'Pycom'],
classifiers = [],
)
| StarcoderdataPython |
1785982 |
#credits?? I even don't know ... everyone changed owner section so if you need credits pm me.
#And Thanks To The Creator Of Autopic This Script Was Made from Snippets From That Script
import requests , re , random
import urllib , os
from telethon.tl import functions
from datetime import datetime
from PIL import Image, ImageDraw, ImageFont
from userbot.events import register
import asyncio
from time import sleep
COLLECTION_STRING = [
"Gravity Falls HD Wallpaper",
"4k-sci-fi-wallpaper",
"Anime Cat Girl Wallpaper",
"To the Moon Wallpaper",
"Fantasy Forest Wallpaper"
]
async def animepp():
os.system("rm -rf donot.jpg")
rnd = random.randint(0, len(COLLECTION_STRING) - 1)
pack = COLLECTION_STRING[rnd]
pc = requests.get("http://getwallpapers.com/collection/" + pack).text
f = re.compile('/\w+/full.+.jpg')
f = f.findall(pc)
fy = "http://getwallpapers.com"+random.choice(f)
print(fy)
if not os.path.exists("f.ttf"):
urllib.request.urlretrieve("https://github.com/rebel6969/mym/raw/master/Rebel-robot-Regular.ttf","f.ttf")
urllib.request.urlretrieve(fy,"donottouch.jpg")
@register(outgoing=True, pattern="^.randpp(?: |$)(.*)")
async def main(event):
await event.edit("**Changing random profile pic. Check after 10 secs**")
while True:
await animepp()
file = await event.client.upload_file("donottouch.jpg")
await event.client(functions.photos.UploadProfilePhotoRequest( file))
os.system("rm -rf donottouch.jpg")
await asyncio.sleep(7200) #Edit this to your required needs
| StarcoderdataPython |
5098977 | <filename>examples/subscription_mgmt.py<gh_stars>0
"""This is the demo script to show how administrator can manage subscriptions
and schedules.
This script will not work without replacing parameters with real values.
Its basic goal is to present what can be done with this module and to
ease its usage.
"""
from mstrio.connection import Connection
from mstrio.admin.subscription.subscription import Subscription, EmailSubscription
from mstrio.admin.subscription.content import Content
from mstrio.admin.subscription.subscription_manager import SubscriptionManager, list_subscriptions
from mstrio.admin.schedule import Schedule, ScheduleManager
base_url = "https://<>/MicroStrategyLibrary/api"
username = "some_username"
password = "<PASSWORD>"
conn = Connection(base_url, username, password, login_mode=1)
# create manager for subscriptions on a chosen application
sub_mngr = SubscriptionManager(connection=conn, application_name='MicroStrategy Tutorial')
# get all subscriptions from the given application (it is possible in two ways)
all_subs = list_subscriptions(connection=conn, application_name='MicroStrategy Tutorial')
all_subs = sub_mngr.list_subscriptions()
# execute/delete subscriptions by passing theirs ids or Subscription objects
sub_mngr.execute(['11223344556677889900AABBCCDDEEFF', 'FFEEDDCCBBAA00998877665544332211'])
sub_mngr.delete(['11223344556677889900AABBCCDDEEFF', 'FFEEDDCCBBAA00998877665544332211'])
# list available recipients of the subscription for the given content (default delivery type is an email)
sub_mngr.available_recipients(content_id='11112222333344445555666677778888', content_type='DOCUMENT')
# get a single subscription
sub = Subscription(connection=conn,
subscription_id='AA11BB22CC33DD44EE55FF6677889900',
application_id='00FF99EE88DD77CC66BB55AA44332211')
# list all recipients of the given subscription and all available for this subscription
sub.recipients
sub.available_recipients()
# add/remove recipient(s) with given id(s)
sub.add_recipient(recipients=['1234567890A1234567890A1234567890', '98765432198765432198765432198765'])
sub.remove_recipient(recipients=['1234567890A1234567890A1234567890', '98765432198765432198765432198765'])
# execute a given subscription
sub.execute()
# replace a user with an admin in all of its subscriptions (e.g. when user exits company)
for s in sub_mngr.list_subscriptions(to_dictionary=False):
if '9871239871298712413241235643132A' in [r['id'] for r in s.recipients]:
s.add_recipient(recipients='11111111111111111111111111111111')
s.remove_recipient(recipients='9871239871298712413241235643132A')
# create an email subscription
EmailSubscription.create(
connection=conn, name="New Email Subscription for a Report", application_name="MicroStrategy Tutorial",
contents=Content(id='ABC123ABC123ABC123ABC123ABC12345', type=Content.Type.REPORT),
schedules_ids=['ABC123ABC123ABC123ABC123ABC12345'], recipients=['ABC123ABC123ABC123ABC123ABC12345'])
# get list of schedules (you can filter them by for example name, id or description)
schdl_mngr = ScheduleManager(connection=conn)
all_schdls = schdl_mngr.list_schedules()
# get a single schedule by its id or name and then its properties
schdl = Schedule(connection=conn, name='Some shedule which runs daily')
schdl.list_properties()
| StarcoderdataPython |
4996785 | '''Preprocessing of data for training and prediction.
'''
import re
def prepare_input(name):
'''Prepares the name of a file for classification or prediction.
'''
# Remove commas
name = name.replace(',', '')
# Lowercase filename
name = name.lower()
# Remove paths
name = name.split('/')[-1]
# Normalize word separators
for char in '._-[]+':
name = name.replace(char, ' ')
# Replace ampersand with and
name = name.replace('&', ' and ')
# Remove any remaining punctuation and non word characters
for char in '\'\"`~!@#$%^*()=|;:<>,/?{}':
name = name.replace(char, '')
# Split season and episode numbers
name = split_season_episode(name)
# Remove duplicate spaces
name = ' '.join(name.split())
return name
def split_season_episode(name):
'''Splits combined season and episodes numbers into separate words.
'''
patterns = [
[r'(?P<sid>s\d+)(?P<eid>e\d+)', '{sid} {eid}'], #s01e01
[r'(?P<sid>\d+)x(?P<eid>\d+)', 's{sid} e{eid}'], #01x01
[r's(?P<sid>\d+) ep(?P<eid>\d+)', 's{sid} e{eid}'] #S1 Ep1
]
for pattern in patterns:
match = re.search(pattern[0], name, re.IGNORECASE)
if match is not None:
name = name.replace(
match.group(0),
pattern[1].format(
sid=match.group('sid'),
eid=match.group('eid')))
return name
| StarcoderdataPython |
4867078 | <filename>mp-tests/test_cap_reading.py
import unittest
import os, sys
import json
# add module dirs to path
# sys.path.append("../")
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..' , 'micropython'))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'micropython', 'lib'))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'stubs'))
# map python modules to micropython names
import binascii, os, socket, struct, time
sys.modules['ubinascii'] = binascii
# sys.modules['uos'] = os
sys.modules['usocket'] = socket
sys.modules['ustruct'] = struct
sys.modules['utime'] = time
import _config
sys.modules['config'] = _config
# import file to test
import cap_reading
class Test_CapReadingMQTTCb(unittest.TestCase):
''' test mqtt_cb callback function
'''
def setUp(self):
'''
'''
# set initial settings
cap_reading.led_blink = False
cap_reading.led_blink_count = 10
cap_reading.settings = {
'led_status_blink': True,
'poll_interval': 5, # time in minutes
'polling_hours' : { # hour of day
'start': 8,
'end': 20
},
'location': "nowhere"
}
def test_led_control(self):
'''
test led control messages are parsed
states checked:
- on -> on
- on -> off
- off -> on
- off -> off
- nonsense message -> no state change
'''
topic = cap_reading.SUBSCRIBE_TOPIC[:-1] + b"led"
cap_reading.led_blink = False
cap_reading.led_blink_count = 10
# test status led on/off
start_values = [True, False]
for start_value in start_values:
with self.subTest('on'):
cap_reading.settings['led_status_blink'] = start_value
message = b"on"
cap_reading.mqtt_cb(topic, message)
self.assertTrue(cap_reading.settings['led_status_blink'])
self.assertFalse(cap_reading.led_blink)
with self.subTest('off'):
cap_reading.settings['led_status_blink'] = start_value
message = b"off"
cap_reading.mqtt_cb(topic, message)
self.assertFalse(cap_reading.settings['led_status_blink'])
self.assertFalse(cap_reading.led_blink)
with self.subTest('nonsense'):
cap_reading.settings['led_status_blink'] = start_value
message = b"nonsense"
cap_reading.mqtt_cb(topic, message)
self.assertEqual(cap_reading.settings['led_status_blink'], start_value)
self.assertFalse(cap_reading.led_blink)
# test blink parsing
messages = [b"blink", b"blink:nonsense", b"blink:5.3"]
for message in messages:
with self.subTest(message):
cap_reading.led_blink = False
cap_reading.led_blink_count = 10
# message = b"blink"
cap_reading.mqtt_cb(topic, message)
self.assertTrue(cap_reading.led_blink)
self.assertEqual(cap_reading.led_blink_count, 10)
self.assertFalse(cap_reading.settings['led_status_blink'])
for i in range(1, 21):
with self.subTest('blink:'+str(i)):
cap_reading.led_blink = False
cap_reading.led_blink_count = 10
message = b"blink:" + str(i).encode()
cap_reading.mqtt_cb(topic, message)
self.assertTrue(cap_reading.led_blink)
self.assertEqual(cap_reading.led_blink_count, i)
self.assertFalse(cap_reading.settings['led_status_blink'])
def test_location_setting(self):
'''
test location messages set location
'''
topic = cap_reading.SUBSCRIBE_TOPIC[:-1] + b"location"
locations = ['somewhere', 'not here', 'over there', '123456']
for location in locations:
cap_reading.settings['location'] = 'nowhere' # reset location
message = location.encode()
cap_reading.mqtt_cb(topic, message)
with self.subTest(location):
self.assertEqual(cap_reading.settings['location'], location)
def test_pollinterval_setting(self):
'''
test poll-interval messages set the polling interval
'''
topic = cap_reading.SUBSCRIBE_TOPIC[:-1] + b"poll-interval"
# test int intervals
for i in range(1, 60):
cap_reading.settings['poll_interval'] = 5 # reset interval
cap_reading.mqtt_cb(topic, i)
with self.subTest(i):
self.assertEqual(cap_reading.settings['poll_interval'], i)
# test not intervals
intervals = ['nan', b'nonsense', 4.4, 4.8, 5.3, 7.8]
for interval in intervals:
cap_reading.settings['poll_interval'] = 5 # reset interval
cap_reading.mqtt_cb(topic, interval)
with self.subTest(interval):
self.assertEqual(cap_reading.settings['poll_interval'], 5)
def test_pollinghours_setting(self):
'''
test polling-hours messages set the polling hours
'''
topic = cap_reading.SUBSCRIBE_TOPIC[:-1] + b"polling-hours"
cap_reading.settings['polling_hours'] = { # hour of day
'start': 8,
'end': 20
}
# test changing one key at a time
for k in cap_reading.settings['polling_hours'].keys():
for i in range(0,24):
cap_reading.settings['polling_hours'] = { # reset
'start': 8,
'end': 20
}
message = {k: i}
cap_reading.mqtt_cb(topic, json.dumps(message))
with self.subTest(k+str(i)):
self.assertEqual(cap_reading.settings['polling_hours'][k], i)
# test changing both keys at a time
for i in range(0,24):
for j in range(0,24):
cap_reading.settings['polling_hours'] = { # reset
'start': 8,
'end': 20
}
message = {
'start': i,
'end': j
}
cap_reading.mqtt_cb(topic, json.dumps(message))
with self.subTest(str(i)+':'+str(j)):
self.assertEqual(cap_reading.settings['polling_hours']['start'], i)
self.assertEqual(cap_reading.settings['polling_hours']['end'], j)
| StarcoderdataPython |
12864089 | <reponame>sphinx-contrib/zopeext
"""
From https://github.com/brechtm/rinohtype/blob/master/noxutil.py
https://github.com/cjolowicz/nox-poetry/discussions/289
"""
import json
from collections.abc import Iterable
from pathlib import Path
from typing import Optional
from urllib.request import urlopen, Request
from poetry.core.factory import Factory
from poetry.core.semver import parse_single_constraint as parse_version
VERSION_PARTS = ("major", "minor", "patch")
def get_versions(
dependency: str,
granularity: str = "minor",
# ascending: bool = False, limit: Optional[int] = None,
# allow_prerelease: bool = False,
) -> Iterable[str]:
"""Yield all versions of `dependency` considering version constraints
Args:
dependency: the name of the dependency
granularity: yield only the newest patch version of each major/minor
release
ascending: count backwards from latest version, by default (not much
use without the 'limit' arg)
limit: maximum number of entries to return
allow_prerelease: whether to include pre-release versions
Yields:
All versions of `dependency` that match the version constraints defined
and in this project's pyproject.toml and the given `granularity`.
"""
package = Factory().create_poetry(Path(__file__).parent).package
for requirement in package.requires:
if requirement.name == dependency:
break
else:
raise ValueError(f"{package.name} has no dependency '{dependency}'")
filtered_versions = [
version
for version in all_versions(dependency)
if requirement.constraint.allows(version)
]
parts = VERSION_PARTS[: VERSION_PARTS.index(granularity) + 1]
result = {}
for version in filtered_versions:
key = tuple(getattr(version, part) for part in parts)
result[key] = max((result[key], version)) if key in result else version
return [str(version) for version in result.values()]
def all_versions(dependency):
request = Request(f"https://pypi.org/pypi/{dependency}/json")
response = urlopen(request)
json_string = response.read().decode("utf8")
json_data = json.loads(json_string)
yield from (parse_version(version) for version in json_data["releases"])
| StarcoderdataPython |
5002093 | <reponame>enthought/etsproxy
# proxy module
from pyface.preference.preference_dialog import *
| StarcoderdataPython |
12847371 | from data_module.get_source_file import *
import numpy as np
class DataCleaner(object):
def __init__(self):
# 图1数据源
self.data_LD = []
self.data_I = []
self.data_LU = []
self.data_T = []
# 图2数据源
self.data_RD = []
self.data_RU = []
# 图3数据源
self.data_K_R = []
self.data_K_L = []
self.data_J = []
# 图4数据源
self.data_LU_LD = []
self.data_RD_RU = []
self.data_Q = []
# 图5数据源
self.data_K = []
self.data_P = []
self.data_R = []
self.data_S = []
# 读文件
self.read_data()
def single_line_cleaning(self, one_line):
end = one_line.index(" 0D 0A")
start = 1
line_result = one_line[start:end]
item_data_list = line_result.split(" ")
# print(item_data_list)
return self.__get_datas(item_data_list)
def get_item_data(self, data, i):
"""
需要计算的单数据生成
:param data:
:param i:
:return:
"""
return ((int(data[i], 16) << 8) + int(data[i + 1], 16)) / 100
def get_item_single_data(self, data, i):
"""
简单的单数据生成
:param i:
:return:
"""
return int(data[i], 16)
def __get_datas(self, data):
"""
多种单数据生成
:param data:
:return:
"""
LD = self.get_item_data(data, 0)
LU = self.get_item_data(data, 2)
RD = self.get_item_data(data, 4)
RU = self.get_item_data(data, 6)
K_R = self.get_item_data(data, 10)
K_L = self.get_item_data(data, 12)
I = self.get_item_single_data(data, 8)
T = self.get_item_single_data(data, 19)
J = self.get_item_single_data(data, 9)
Q = self.get_item_single_data(data, 16) / 50
K = self.get_item_single_data(data, 16)
P = self.get_item_single_data(data, 16) / 10
R = self.get_item_single_data(data, 17)
S = self.get_item_single_data(data, 18)
return LD, LU, RD, RU, K_R, K_L, I, T, J, Q, K, P, R, S
def read_data(self):
source_data = file_read_lines()
for line_str in source_data:
cleaned_data = self.single_line_cleaning(line_str)
# print(cleaned_data)
# LD, LU, RD, RU, K_R, K_L, I, T, J, Q, K, P, R, S
self.data_LD.append(cleaned_data[0])
self.data_LU.append(cleaned_data[1])
self.data_RD.append(cleaned_data[2])
self.data_RU.append(cleaned_data[3])
self.data_K_R.append(cleaned_data[4])
self.data_K_L.append(cleaned_data[5])
self.data_I.append(cleaned_data[6])
self.data_T.append(cleaned_data[7])
self.data_J.append(cleaned_data[8])
self.data_Q.append(cleaned_data[9])
self.data_K.append(cleaned_data[10])
self.data_P.append(cleaned_data[11])
self.data_R.append(cleaned_data[12])
self.data_S.append(cleaned_data[13])
self.data_LU_LD.append(round(cleaned_data[1] - cleaned_data[0], 2))
self.data_RD_RU.append(round(cleaned_data[2] - cleaned_data[3], 2))
def get_chart_data(self, chart_num):
# print("data_LD:%s" % self.data_LD)
# print("data_LU:%s" % self.data_LU)
# print("data_RD:%s" % self.data_RD)
# print("data_K_R:%s" % self.data_K_R)
# print("data_K_L:%s" % self.data_K_L)
# print("data_I:%s" % self.data_I)
# print("data_T:%s" % self.data_T)
# print("data_J:%s" % self.data_J)
# print("data_Q:%s" % self.data_Q)
# print("data_K:%s" % self.data_K)
# print("data_P:%s" % self.data_P)
# print("data_R:%s" % self.data_R)
# print("data_S:%s" % self.data_S)
if chart_num == '1':
# 返回图1 所有需要的数据
return {'data_LD': self.data_LD,
'data_I': self.data_I,
'data_LU': self.data_LU,
'data_T': self.data_T, }
elif chart_num == '2':
# 返回图2 所有需要的数据
return {'data_RD': self.data_RD,
'data_I': self.data_I,
'data_RU': self.data_RU,
'data_T': self.data_T, }
elif chart_num == '3':
return {'data_RD': self.data_K_R,
'data_K_L': self.data_K_L,
'data_I': self.data_I,
'data_T': self.data_T,
'data_J': self.data_J, }
elif chart_num == '4':
return {'data_LU_LD': self.data_LU_LD,
'data_RD_RU': self.data_RD_RU,
'data_K_R': self.data_K_R,
'data_K_L': self.data_K_L,
'data_I': self.data_I,
'data_T': self.data_T,
'data_J': self.data_J,
'data_Q': self.data_Q, }
elif chart_num == '5':
return {'data_K': self.data_K,
'data_P': self.data_P,
'data_R': self.data_R,
'data_S': self.data_S, }
if __name__ == '__main__':
dc = DataCleaner()
m = dc.get_chart_data("1")
for key in m:
print(key + ':' + m[key].__str__())
| StarcoderdataPython |
12836323 | <gh_stars>0
#
# VScodeで入力をテキストから読み込んで標準入力に渡す
import sys
import os
f=open(r'.\D\D_input.txt', 'r', encoding="utf-8")
# inputをフルパスで指定
# win10でファイルを作るとs-jisで保存されるため、読み込みをutf-8へエンコードする必要あり
# VScodeでinput file開くとutf8になってるんだけど中身は結局s-jisになっているらしい
sys.stdin=f
#
# 入力スニペット
# num = int(input())
# num_list = [int(item) for item in input().split()]
# num_list = [input() for _ in range(3)]
##################################
#
# 以下ペースト可
# start 21:14
a, b = [int(item) for item in input().split()]
# 素因数リスト
import math
def factor(num):
divisor_list = [1]
divisor = 2
max_prime = int(math.sqrt(num))
while max_prime >= divisor:
if num % divisor == 0:
divisor_list.append(divisor)
num //= divisor
else:
divisor += 1
divisor_list.append(num)
return divisor_list
# 最大公約数
def gcd(a, b):
while b > 1:
a, b = b, a & b
return a
cd_list = set(factor(gcd(a, b)))
print(len(cd_list))
| StarcoderdataPython |
6435455 | # -*- coding: utf-8 -*-
# Copyright 2017, A10 Networks
# Author: <NAME>: @mike @t @<EMAIL>
import socket
from struct import *
# checksum functions needed for calculation checksum
def checksum(msg):
s = 0
# loop taking 2 characters at a time
for i in range(0, len(msg), 2):
try:
w = ord(msg[i]) + (ord(msg[i + 1]) << 8)
except:
w = ord(msg[i])
s = s + w
s = (s >> 16) + (s & 0xffff)
s = s + (s >> 16)
# complement and mask to 4 byte short
s = ~s & 0xffff
return s
class tcp_packet():
def __init__(self, tcp_seq, tcp_ack_seq, tcp_sport, tcp_dport, src_addr, dst_addr, userdata):
# tcp header fields
self.user_data = userdata
self.tcp_source = tcp_sport # source port
self.tcp_dest = tcp_dport # destination port
self.tcp_seq = tcp_seq
self.tcp_ack_seq = tcp_ack_seq
self.tcp_doff = 5 # 4 bit field, size of tcp header, 5 * 4 = 20 bytes
# tcp flags
self.tcp_fin = 0
self.tcp_syn = 0
self.tcp_rst = 0
self.tcp_psh = 0
self.tcp_ack = 0
self.tcp_urg = 0
self.tcp_window = socket.htons(65534) # maximum allowed window size
self.tcp_check = 0
self.tcp_urg_ptr = 0
# create a raw socket
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
except socket.error, msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
self.s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# now start constructing the packet
self.packet = '';
self.source_ip = src_addr
self.dest_ip = dst_addr # or socket.gethostbyname('www.google.com')
# ip header fields
self.ip_ihl = 5
self.ip_ver = 4
# Set tos bit to map to IPtables match class to permit resets.
self.ip_tos = 160
self.ip_tot_len = 0 # kernel will fill the correct total length
self.ip_id = 0 # Id of this packet
self.ip_frag_off = 0
self.ip_ttl = 128 # choice([255, 128, 64])
self.ip_proto = socket.IPPROTO_TCP
self.ip_check = 0 # kernel will fill the correct checksum
self.ip_saddr = socket.inet_aton(self.source_ip) # Spoof the source ip address if you want to
self.ip_daddr = socket.inet_aton(self.dest_ip)
self.ip_ihl_ver = (self.ip_ver << 4) + self.ip_ihl
# tcp header fields
def pack(self):
"""Need to add TCP Options RFC 1392 and other cools stuff like SACK and stuff.
However, time is never on my side. Maybe a community project...
This all needs to be ported to C once the DPDK Driver is completed.
"""
self.ip_header = pack('!BBHHHBBH4s4s', self.ip_ihl_ver, self.ip_tos, self.ip_tot_len, self.ip_id,
self.ip_frag_off,
self.ip_ttl, self.ip_proto, self.ip_check, self.ip_saddr, self.ip_daddr)
self.tcp_offset_res = (self.tcp_doff << 4) + 0
self.tcp_flags = self.tcp_fin + (self.tcp_syn << 1) \
+ (self.tcp_rst << 2) + (self.tcp_psh << 3) + (self.tcp_ack << 4) + (self.tcp_urg << 5)
# print (self.tcp_source, self.tcp_dest, self.tcp_seq, self.tcp_ack_seq,
# self.tcp_offset_res, self.tcp_flags, self.tcp_window, self.tcp_check, self.tcp_urg_ptr)
# the ! in the pack format string means network order
self.tcp_header = pack('!HHLLBBHHH', self.tcp_source, self.tcp_dest, self.tcp_seq, self.tcp_ack_seq, \
self.tcp_offset_res, self.tcp_flags, self.tcp_window, self.tcp_check, self.tcp_urg_ptr)
# self.user_data = ''
# pseudo header fields
self.source_address = socket.inet_aton(self.source_ip)
self.dest_address = socket.inet_aton(self.dest_ip)
self.placeholder = 0
self.protocol = socket.IPPROTO_TCP
self.tcp_length = len(self.tcp_header) + len(self.user_data)
self.psh = pack('!4s4sBBH', self.source_address, self.dest_address,
self.placeholder, self.protocol, self.tcp_length)
self.psh = self.psh + self.tcp_header + str(self.user_data)
self.tcp_check = checksum(self.psh)
self.tcp_header = pack('!HHLLBBH',
self.tcp_source, self.tcp_dest, self.tcp_seq, self.tcp_ack_seq, self.tcp_offset_res, \
self.tcp_flags, self.tcp_window) + pack('H', self.tcp_check) + pack('!H',
self.tcp_urg_ptr)
self.packet = self.ip_header + self.tcp_header + self.user_data
def sendto(self):
self.s.sendto(self.packet, (self.dest_ip, 0))
if __name__ == "__main__":
x = tcp_packet()
| StarcoderdataPython |
11262525 | <filename>100-Testes/threadTeste.py<gh_stars>0
from threading import Thread
import sys
import numpy as np
#documentação https://docs.python.org/release/2.5.2/lib/thread-objects.html
values = {0: [1]*100000000, 1: [3, 2]}
sequential_total = 0
threaded_total = 0
threads = []
class Th(Thread):
def __init__(self, num):
sys.stdout.write("Criando Thread numero " + str(num + 1) + "\n")
sys.stdout.flush()
Thread.__init__(self)
self.num = num
self.subtotal = 0
def run(self):
self.subtotal = np.sum(values[self.num])
sys.stdout.write("Subtotal: " + str(self.get_subtotal()) + "\n")
sys.stdout.flush()
def get_subtotal(self):
return self.subtotal
#### O programa comeca aqui #####
for thread_number in range(2):
threads.insert(thread_number, Th(thread_number))
threads[thread_number].start()
threaded_total += threads[0].get_subtotal()
threaded_total += threads[1].get_subtotal()
print("Total: " + str(threaded_total))
'''
threads[thread_number].join(0)
print(thread_number)
'''
| StarcoderdataPython |
169019 | <gh_stars>0
import unittest
from tests.templates.rectangle_world import (
example_wiki_ccl,
example_simple_different_regions,
)
from dekespo_ai_sdk.algorithms.connected_component_labelling import (
ConnectedComponentLabelling,
)
from dekespo_ai_sdk.core.graph import Graph
from dekespo_ai_sdk.core.shapes import Shape2DType
from dekespo_ai_sdk.core.dimensions import Dim2D
from dekespo_ai_sdk.core.raw_data_handler import RawDataHandler
from dekespo_ai_sdk.core.neighbour import NeighbourType
class ConnectedComponentLabellingTest(unittest.TestCase):
def test_wiki_example(self):
raw_data_handler = RawDataHandler(example_wiki_ccl())
graph = Graph(raw_data_handler, Shape2DType.RECTANGLE, blocking_values=[0])
labeller = ConnectedComponentLabelling(graph, NeighbourType.CONNECTIVITY_EIGHT)
labeller.first_pass()
first_pass_data = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 3, 3, 3, 3, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 3, 3, 3, 0, 0, 3, 3, 0],
[0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 3, 3, 3, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 5, 3, 0, 0, 0, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 6, 6, 5, 3, 0, 0, 7, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertEqual(labeller.get_labels_graph(), first_pass_data)
labeller.second_pass()
second_pass_data = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0, 2, 2, 0, 0, 4, 4, 0, 0, 4, 4, 0],
[0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 4, 4, 4, 4, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 2, 2, 2, 2, 0, 0, 0, 4, 4, 4, 0, 0, 4, 4, 0],
[0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 0, 4, 4, 4, 0, 0, 0],
[0, 0, 2, 2, 0, 0, 0, 0, 0, 4, 4, 0, 0, 0, 4, 4, 0],
[0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 0, 0, 4, 4, 4, 4, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertEqual(labeller.get_labels_graph(), second_pass_data)
regions = labeller.get_regions()
self.assertEqual(len(regions), 3)
self.assertTrue(Dim2D(0, 0) in regions[0])
self.assertTrue(Dim2D(8, 3) in regions[0])
self.assertTrue(Dim2D(11, 6) in regions[0])
self.assertTrue(Dim2D(1, 2) in regions[2])
self.assertTrue(Dim2D(4, 4) in regions[2])
self.assertTrue(Dim2D(15, 1) in regions[4])
self.assertTrue(Dim2D(13, 5) in regions[4])
def test_different_regions_8_connectivity(self):
raw_data_handler = RawDataHandler(example_simple_different_regions())
graph = Graph(raw_data_handler, Shape2DType.RECTANGLE, blocking_values=[1])
labeller = ConnectedComponentLabelling(graph, NeighbourType.CONNECTIVITY_EIGHT)
labeller.first_pass()
labeller.second_pass()
regions = labeller.get_regions()
self.assertEqual(len(regions), 3)
self.assertTrue(Dim2D(0, 1) in regions[0])
self.assertTrue(Dim2D(2, 1) in regions[0])
self.assertTrue(Dim2D(5, 4) in regions[0])
self.assertTrue(Dim2D(0, 0) in regions[2])
self.assertTrue(Dim2D(0, 4) in regions[2])
self.assertTrue(Dim2D(6, 0) in regions[2])
self.assertTrue(Dim2D(6, 4) in regions[2])
self.assertTrue(Dim2D(3, 0) in regions[2])
self.assertTrue(Dim2D(3, 4) in regions[2])
self.assertTrue(Dim2D(1, 2) in regions[2])
self.assertTrue(Dim2D(5, 2) in regions[2])
self.assertTrue(Dim2D(3, 2) in regions[4])
def test_different_regions_4_connectivity(self):
raw_data_handler = RawDataHandler(example_simple_different_regions())
graph = Graph(raw_data_handler, Shape2DType.RECTANGLE, blocking_values=[1])
labeller = ConnectedComponentLabelling(graph, NeighbourType.CONNECTIVITY_FOUR)
labeller.first_pass()
labeller.second_pass()
regions = labeller.get_regions()
self.assertEqual(len(regions), 10)
self.assertTrue(Dim2D(0, 1) in regions[0])
self.assertTrue(Dim2D(2, 1) in regions[0])
self.assertTrue(Dim2D(5, 4) in regions[0])
self.assertTrue(Dim2D(0, 0) in regions[1])
self.assertTrue(Dim2D(0, 4) in regions[7])
self.assertTrue(Dim2D(6, 0) in regions[3])
self.assertTrue(Dim2D(6, 4) in regions[9])
self.assertTrue(Dim2D(3, 2) in regions[6])
self.assertTrue(Dim2D(3, 0) in regions[2])
self.assertTrue(Dim2D(3, 4) in regions[8])
self.assertTrue(Dim2D(1, 2) in regions[4])
self.assertTrue(Dim2D(5, 2) in regions[5])
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1645019 | <reponame>asimonia/pricing-alerts
import uuid
from common.database import Database
import models.stores.constants as StoreConstants
import models.stores.errors as StoreErrors
class Store:
def __init__(self, name, url_prefix, tag_name, query, _id=None):
self.name = name
self.url_prefix = url_prefix
self.tag_name = tag_name
self.query = query
self._id = uuid.uuid4().hex if _id is None else _id
def __repr__(self):
return "<Store {}>".format(self.name)
def json(self):
return {
"_id": self._id,
"name": self.name,
"url_prefix": self.url_prefix,
"tag_name": self.tag_name,
"query": self.query
}
@classmethod
def get_by_id(cls, id):
"""Get the store by id"""
return cls(**Database.find_one(StoreConstants.COLLECTION, {"_id": id}))
def save_to_mongo(self):
"""Update a record in db"""
Database.update(StoreConstants.COLLECTION, {"_id": self._id}, self.json())
@classmethod
def get_by_name(cls, store_name):
return cls(**Database.find_one(StoreConstants.COLLECTION, {"name": store_name}))
@classmethod
def get_by_url_prefix(cls, url_prefix):
"""Allow users to give the item url."""
return cls(**Database.find_one(StoreConstants.COLLECTION, {"url_prefix": {"$regex": '^{}'.format(url_prefix)}}))
@classmethod
def find_by_url(cls, url):
"""
Try to find and return a store from a url, if there isn't anything, return None
"""
for i in range(0, len(url) + 1):
try:
store = cls.get_by_url_prefix(url[:i])
return store
except:
raise StoreErrors.StoreNotFoundException("The URL prefix did not give us any results.")
@classmethod
def all(cls):
"""Return all items from the Database collection"""
return [cls(**elem) for elem in Database.find(StoreConstants.COLLECTION, {})]
def delete(self):
Database.remove(StoreConstants.COLLECTION, {'_id': self._id}) | StarcoderdataPython |
9747182 | <filename>tensorflow/python/autograph/pyct/static_analysis/reaching_fndefs.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An analysis that determines the reach of a function definition.
A function definition is said to reach a statement if that function may exist
(and therefore may be called) when that statement executes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import transformer
class Definition(object):
"""Definition objects describe a unique definition of a function."""
def __init__(self, def_node):
self.def_node = def_node
class _NodeState(object):
"""Abstraction for the state of the CFG walk for reaching definition analysis.
This is a value type. Only implements the strictly necessary operators.
Attributes:
value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and
their possible definitions
"""
def __init__(self, init_from=None):
if init_from:
self.value = set(init_from)
else:
self.value = set()
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
def __or__(self, other):
assert isinstance(other, _NodeState)
result = _NodeState(self.value)
result.value.update(other.value)
return result
def __add__(self, value):
result = _NodeState(self.value)
result.value.add(value)
return result
def __repr__(self):
return 'NodeState[%s]=%s' % (id(self), repr(self.value))
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that determines reaching definitions at statement level."""
def __init__(self, graph, external_defs):
super(Analyzer, self).__init__(graph)
# This allows communicating that nodes have extra reaching definitions,
# e.g. those that a function closes over.
self.external_defs = external_defs
def init_state(self, _):
return _NodeState()
def visit_node(self, node):
prev_defs_out = self.out[node]
if node is self.graph.entry:
defs_in = _NodeState(self.external_defs)
else:
defs_in = prev_defs_out
for n in node.prev:
defs_in |= self.out[n]
defs_out = defs_in
if isinstance(node.ast_node, (gast.Lambda, gast.FunctionDef)):
defs_out += node.ast_node
self.in_[node] = defs_in
self.out[node] = defs_out
return prev_defs_out != defs_out
class TreeAnnotator(transformer.Base):
"""AST visitor that annotates each symbol name with its reaching definitions.
Simultaneously, the visitor runs the dataflow analysis on each function node,
accounting for the effect of closures. For example:
def foo():
def f():
pass
def g():
# `def f` reaches here
"""
def __init__(self, source_info, graphs):
super(TreeAnnotator, self).__init__(source_info)
self.graphs = graphs
self.allow_skips = False
self.current_analyzer = None
def _proces_function(self, node):
parent_analyzer = self.current_analyzer
subgraph = self.graphs[node]
if (self.current_analyzer is not None
and node in self.current_analyzer.graph.index):
cfg_node = self.current_analyzer.graph.index[node]
defined_in = self.current_analyzer.in_[cfg_node].value
else:
defined_in = ()
analyzer = Analyzer(subgraph, defined_in)
analyzer.visit_forward()
self.current_analyzer = analyzer
node = self.generic_visit(node)
self.current_analyzer = parent_analyzer
return node
def visit_FunctionDef(self, node):
return self._proces_function(node)
def visit_Lambda(self, node):
return self._proces_function(node)
def visit(self, node):
# This can happen before entering the top level function
if (self.current_analyzer is not None
and node in self.current_analyzer.graph.index):
cfg_node = self.current_analyzer.graph.index[node]
anno.setanno(node, anno.Static.DEFINED_FNS_IN,
self.current_analyzer.in_[cfg_node].value)
extra_node = anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST, default=None)
if extra_node is not None:
cfg_node = self.current_analyzer.graph.index[extra_node]
anno.setanno(extra_node, anno.Static.DEFINED_FNS_IN,
self.current_analyzer.in_[cfg_node].value)
return super(TreeAnnotator, self).visit(node)
def resolve(node, source_info, graphs):
"""Resolves reaching definitions for each symbol.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
Returns:
ast.AST
"""
visitor = TreeAnnotator(source_info, graphs)
node = visitor.visit(node)
return node
| StarcoderdataPython |
9738425 | # Copyright 2020 EraO Prosopagnosia Helper Dev Team, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# Supervised by Prof. <NAME> (http://www.eecg.toronto.edu/~mann/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import re
import time
from app.api.HttpResponse import http_response
from flask import request
from flask_bcrypt import Bcrypt
from werkzeug.exceptions import RequestEntityTooLarge
from werkzeug.utils import secure_filename
from app import webapp
from app.AccountManagment import validUsernameChar, get_database
from app.FileUploader import UPLOAD_FOLDER
from app.S3Helper import get_file_path_by_key, store_file
@webapp.route('/api/register', methods=['POST'])
def user_register_api():
'''
This function takes POST http request with a URL of "/api/register". It firstly reads the user submitted username
and password. It then connects to the database to check if there is already an existing username in the database.
The function also checks whether the user provided all the necessary information; whether the format of the
username and password are correct. If any of the above conditions failed, the function will return user with a
formatted Json string including the error code and error message. If all the condition check passed, the function
will create a new entry in the dataset and return a Json string with code 200 indicating request processed
successfully.
:return: Json string with status code and information string
'''
bcrypt = Bcrypt(webapp)
# need to trim the user name
username = request.form.get('username', "")
password = request.form.get('password', "")
# connect to database
cnx = get_database()
cursor = cnx.cursor()
query = "SELECT COUNT(username) FROM user_info WHERE username = %s "
cursor.execute(query, (username,))
results = cursor.fetchall()
numberOfExistUser = results[0][0]
if numberOfExistUser != 0:
return http_response(409, "Error: User name already exist!")
if username == "" or password == "":
return http_response(400, "Error: All fields are required!")
if re.findall(r'\s+', username) != []:
return http_response(400, "Error: No space allowed in user name!")
if (len(username) > 20 or len(username) < 1) or not all(c in validUsernameChar for c in username):
return http_response(400,
"Error: Username violation, username must have length between 1 to 20, only letters and numbers allowed")
if len(password) > 16 or len(password) < 1:
return http_response(400, "Error: Password length violation")
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
password = bcrypt.generate_password_hash(password).decode("utf-8")
query = ''' INSERT INTO user_info (username,password,create_date,active,upload_counter)
VALUES (%s,%s, %s,1,0)
'''
cursor.execute(query, (username, password, timestamp))
cnx.commit()
# Add error catch here for sql
return http_response(200, "Registration succeed for the user: " + username)
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
webapp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# after user click the upload button
@webapp.route('/api/upload', methods=['POST'])
def upload_file_api():
'''
This function provides users with an api to upload an image together with given username and password.
The function will first check if the user info is correct and if it's correct, the function will keep a record
of the image and an OpenCV-processed image in the database, with the proper naming scheme.
The function can raise exceptions if there are any of the following problems: no file selected; filename too long;
wrong extension type; file too large.
If the uploaded is valid then we will connect to the database and create a record. First, we assign systematic names
to the image and its processed image depending on the user id and their upload counter. Second, we save the image
to the cloud, process it through OpenCV and then save the processed image to the cloud. Third, we gather all
information and update our file name table in the database.
Last we increase the upload counter by 1 and update it.
:return: Json string with status code and information string
'''
bcrypt = Bcrypt(webapp)
try:
username = request.form['username']
password = request.form['password']
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
return http_response(404, "No file upload in the request!")
try:
file = request.files['file']
except RequestEntityTooLarge:
return http_response(413, "Image too large, file cannot larger than 5mb")
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return http_response(404, "No file selected!")
if len(file.filename) >= 50:
return http_response(400, "File name too long")
if file and allowed_file(file.filename):
# ===================================================#
# ======Till this step the file is good to process===#
# ===================================================#
# rename the upload img as: userpid_useruploadcounter_imagename.extention
userFileName = secure_filename(file.filename) # example: example.jpg
# connect to database
cnx = get_database()
cursor = cnx.cursor()
query1 = "SELECT password, uid, upload_counter FROM user_info WHERE username = %s and active = 1"
cursor.execute(query1, (username,))
results = cursor.fetchall()
if len(results) != 1:
return http_response(400, "Invalid username or password")
correctPwd = bcrypt.check_password_hash(results[0][0], password)
if correctPwd:
uid = results[0][1]
upload_counter = results[0][2]
cloudSaveFilename = str(uid) + "_" + str(
upload_counter) + "_" + userFileName # example: 12_1_example.jpg
cloudProcessedFileName = "p_" + cloudSaveFilename
userDownloadFileName = "processed_" + userFileName
# save uploaded img to cloud drive
# file.save(os.path.join(webapp.config['UPLOAD_FOLDER'], cloudSaveFilename))
store_file(cloudSaveFilename, file)
new_file = get_file_path_by_key(cloudSaveFilename)
# process the img from cloud drive, it will process the img in (img_path) and save processed img in same path
Opencv.imageProcess(cloudProcessedFileName, new_file)
# prepare for values for sql
fileName = userFileName
processedFileName = "processed_" + userFileName
uploadImagePath = UPLOAD_FOLDER + cloudSaveFilename
processedImagePath = UPLOAD_FOLDER + cloudProcessedFileName
ts = time.time()
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
# update file_name table
query2 = "INSERT INTO file_info (uid, file_name, upload_image_path, cloud_image_name, processed_image_path, cloud_processed_image_name, create_time) VALUES (%s, %s, %s, %s, %s , %s, %s)"
data = (
uid, fileName, uploadImagePath, cloudSaveFilename, processedImagePath, cloudProcessedFileName,
timeStamp)
cursor.execute(query2, data)
cnx.commit()
# get the newest user upload counter for database
query3 = "SELECT upload_counter FROM user_info WHERE username = %s and active = 1"
cursor.execute(query3, (username,))
results = cursor.fetchall()
upload_counter = results[0][0]
# update user_table
query4 = "UPDATE user_info SET upload_counter = %s WHERE uid = %s"
cursor.execute(query4, (upload_counter + 1, uid))
cnx.commit()
print("==>process succeed")
# get the image path for both image_before and image_after
return http_response(200, "Image Successfully Processed!")
else:
return http_response(400, "Invalid username or password")
else:
return http_response(400, "Not a Correct File Type!" + str(
file and allowed_file(file.filename)) + "|" + file.filename)
return http_response(123, "Unsupported method!")
except Exception as ex:
if '413' in str(ex):
return http_response(413, "Image too large, file cannot larger than 5mb")
return http_response(400, str(ex))
| StarcoderdataPython |
3359326 | <filename>Practice/divisibility.py<gh_stars>0
n = int(input())
cases = [0 for j in range(n)]
for i in range(n):
line = [int(k) for k in input().split()]
if(line[0]%line[1] == 0):
cases[i] = 0
else:
cases[i] = line[1] - line[0]%line[1]
for j in cases:
print(str(j))
| StarcoderdataPython |
11356717 | <filename>emiproc/country_code.py
# constants
country_codes = {
"ALB": 1,
"AUT": 2,
"BEL": 3,
"BGR": 4,
"DNK": 6,
"FIN": 7,
"FRA": 8,
"SMR": 8, #HJM
"FGD": 9,
"FFR": 10,
"GRC": 11,
"HUN": 12,
"IRL": 14,
"ITA": 15,
"LUX": 16,
"NLD": 17,
"NOR": 18,
"POL": 19,
"PRT": 20,
"ROM": 21,
"ROU": 21,
"ESP": 22,
"AND": 22, # Andora -> ESP
"SWE": 23,
"CHE": 24,
"TUR": 25,
"GBR": 27,
"IMN": 27, # Isle of Man -> GBR
"GGY": 27, # Guernsey -> GBR
"JEY": 27, # Jursey -> GBR
"BLR": 39,
"UKR": 40,
"MKD": 41,
"MDA": 42,
"EST": 43,
"LVA": 44,
"LTU": 45,
"CZE": 46,
"SVK": 47,
"SVN": 48,
"HRV": 49,
"BIH": 50,
"YUG": 51,
"GEO": 54,
"MLT": 57,
"DEU": 60,
"RUS": 61,
"ARM": 56,
"AZE": 58,
"CYP": 55,
"ISL": 18, # added for AQMEII, assumed to be same as Norway
# ISO 2 (from EMEP site)
"AL": 1, # Albania
"AT": 2, # Austria
"BE": 3, # Belgium
"BG": 4, # Bulgaria
"FCS": 5, # Former Czechoslovakia
"DK": 6, # Denmark
"FI": 7, # Finland
"ALD": 7, # Aland, part of Finland
"FR": 8, # France
"FGD": 9, # Former German Democratic Republic
"FFR": 10, # Former Federal Republic of Germany
"GR": 11, # Greece
"WSB": 11, # Akrotiri/Santorini (Assigned to Greece)
"HU": 12, # Hungary
"IS": 13, # Iceland
"IE": 14, # Ireland
"IT": 15, # Italy
"LU": 16, # Luxembourg
"NL": 17, # Netherlands
"NO": 18, # Norway
"PL": 19, # Poland
"PT": 20, # Portugal
"RO": 21, # Romania
"ES": 22, # Spain
"SMR": 22, # San Marino (assigned to Spain)
"AD": 22, # Andorra (assigned to Spain)
"GIB": 22, # Gibraltar (assigned to Spain)
"SE": 23, # Sweden
"CH": 24, # Switzerland
"TR": 25, # Turkey
"FSU": 26, # Former USSR
"GB": 27, # United Kingdom
"FRO": 27, # Faroe Island (assigned to United Kingdom)
"VOL": 28, # Volcanic emissions
"REM": 29, # Remaining land Areas
"BAS": 30, # Baltic Sea
"NOS": 31, # North Sea
"ATL": 32, # Remaining North-East Atlantic Ocean
"MED": 33, # Mediterranean Sea
"BLS": 34, # Black Sea
"NAT": 35, # Natural marine emissions
"RUO": 36, # Kola & Karelia
"RUP": 37, # St.Petersburg & Novgorod-Pskov
"RUA": 38, # Kaliningrad
"BY": 39, # Belarus
"UA": 40, # Ukraine
"MD": 41, # Republic of Moldova
"RUR": 42, # Rest of the Russian Federation
"EE": 43, # Estonia
"LV": 44, # Latvia
"LT": 45, # Lithuania
"CZ": 46, # Czech Republic
"SK": 47, # Slovakia
"SI": 48, # Slovenia
"HR": 49, # Croatia
"BA": 50, # Bosnia and Herzegovina
"CS": 51, # Serbia and Montenegro
"MK": 52, # The former Yugoslav Republic of Macedonia
"KZ": 53, # Kazakhstan in the former official EMEP domain
"GE": 54, # Georgia
"CY": 55, # Cyprus
"CYN": 55, # Cyprus (alternate code)
"CNM": 55, # Cyprus (alternate code)
"ESB": 55, # Dhekelia Cantonment (assigned to Cyprus)
"AM": 56, # Armenia
"MT": 57, # Malta
"ASI": 58, # Remaining Asian areas
"LI": 59, # Liechtenstein,
"LIE": 59, # Liechtenstein (alternate code)
"DE": 60, # Germany
"RU": 61, # Russian Federation in the former official EMEP domain
"MC": 62, # Monaco
"MCO": 62, # Monaco
"NOA": 63, # North Africa
"MAR": 63, # Maroko (assigned to NOA)
"TUN": 63, # Tunisia (assigned to NOA)
"DZA": 63, # Algeria (assigned to NOA)
"SYR": 63, # Syria (assigned to NOA)
"EU": 64, # European Community
"US": 65, # United States
"CA": 66, # Canada
"BIC": 67, # Boundary and Initial Conditions
"KG": 68, # Kyrgyzstan
"AZ": 69, # Azerbaijan
"ATX": 70, # EMEP-external Remaining North-East Atlantic Ocean
"RUX": 71, # EMEP-external part of Russian Federation
"RS": 72, # Serbia
"SRB": 72, # Serbia (alternate code)
"KOS": 72, # Kosovo (assigned to SRB)
"ME": 73, # Montenegro
"MNE": 73, # Montenegro (alternate code)
"RFE": 74, # Rest of Russian Federation in the extended EMEP domain
"KZE": 75, # Rest of Kazakhstan in the extended EMEP domain
"UZO": 76, # Uzbekistan in the former official EMEP domain
"TMO": 77, # Turkmenistan in the former official EMEP domain
"UZE": 78, # Rest of Uzbekistan in the extended EMEP domain
"TME": 79, # Rest of Turkmenistan in the extended EMEP domain
"CAS": 80, # Caspian Sea
"TJ": 81, # Tajikistan
"ARO": 82, # Aral Lake in the former official EMEP domain
"ARE": 83, # Rest of Aral Lake in the extended EMEP domain
"ASM": 84, # Modified Remaining Asian Areas in the former official EMEP domain
"ASE": 85, # Remaining Asian Areas in the extended EMEP domain
"AOE": 86, # Arctic Ocean in the extended EMEP domain
"RFX": 87, # Extended EMEP External Part of Russian Federation
"ASX": 88, # Extended EMEP External Part of Asia
"PAX": 89, # Extended EMEP External Part of Pacific Ocean
"AOX": 90, # Extended EMEP External Part of Arctic Ocean
"NAX": 91, # Extended EMEP External Part of North Africa
"KZT": 92, # Kazakhstan
"RUE": 93, # Russian Federation in the extended EMEP domain (RU+RFE+RUX)
"UZ": 94, # Uzbekistan
"TM": 95, # Turkmenistan
"AST": 96, # Asian areas in the extended EMEP domain (ASM+ASE+ARO+ARE+CAS)
"FYU": 99, # Former Yugoslavia
"BEF": 301, # Belgium (Flanders)
"BA2": 302, # Baltic Sea EU Cargo o12m
"BA3": 303, # Baltic Sea ROW Cargo o12m
"BA4": 304, # Baltic Sea EU Cargo i12m
"BA5": 305, # Baltic Sea ROW Cargo i12m
"BA6": 306, # Baltic Sea EU Ferry o12m
"BA7": 307, # Baltic Sea ROW Ferry o12m
"BA8": 308, # Baltic Sea EU Ferry i12m
"BA9": 309, # Baltic Sea ROW Ferry i12m
"NO2": 312, # North Sea EU Cargo o12m
"NO3": 313, # North Sea ROW Cargo o12m
"NO4": 314, # North Sea EU Cargo i12m
"NO5": 315, # North Sea ROW Cargo i12m
"NO6": 316, # North Sea EU Ferry o12m
"NO7": 317, # North Sea ROW Ferry o12m
"NO8": 318, # North Sea EU Ferry i12m
"NO9": 319, # North Sea ROW Ferry i12m
"AT2": 322, # Remaining North-East Atlantic Ocean EU Cargo 'o1': 2 # m
"AT3": 323, # Remaining North-East Atlantic Ocean ROW Cargo 'o1': 2 # m
"AT4": 324, # Remaining North-East Atlantic Ocean EU Cargo 'i1': 2 # m
"AT5": 325, # Remaining North-East Atlantic Ocean ROW Cargo 'i1': 2 # m
"AT6": 326, # Remaining North-East Atlantic Ocean EU Ferry 'o1': 2 # m
"AT7": 327, # Remaining North-East Atlantic Ocean ROW Ferry 'o1': 2 # m
"AT8": 328, # Remaining North-East Atlantic Ocean EU Ferry 'i1': 2 # m
"AT9": 329, # Remaining North-East Atlantic Ocean ROW Ferry 'i1': 2 # m
"ME2": 332, # Mediterranean Sea EU Cargo o12m
"ME3": 333, # Mediterranean Sea ROW Cargo o12m
"ME4": 334, # Mediterranean Sea EU Cargo i12m
"ME5": 335, # Mediterranean Sea ROW Cargo i12m
"ME6": 336, # Mediterranean Sea EU Ferry o12m
"ME7": 337, # Mediterranean Sea ROW Ferry o12m
"ME8": 338, # Mediterranean Sea EU Ferry i12m
"ME9": 339, # Mediterranean Sea ROW Ferry i12m
"BL2": 342, # Black Sea EU Cargo o12m
"BL3": 343, # Black Sea ROW Cargo o12m
"BL4": 344, # Black Sea EU Cargo i12m
"BL5": 345, # Black Sea ROW Cargo i12m
"BL6": 346, # Black Sea EU Ferry o12m
"BL7": 347, # Black Sea ROW Ferry o12m
"BL8": 348, # Black Sea EU Ferry i12m
"BL9": 349, # Black Sea ROW Ferry i12m
"GL": 601, # Greenland
}
| StarcoderdataPython |
5068574 | import numpy as np
import matplotlib.pyplot as plt
import cv2
import gin
import typing
import torch
import imutils
from scipy.stats import multivariate_normal
import xml.etree.ElementTree as ET
from soccer_robot_perception.utils.constants import CLASS_MAPPING_DETECTION
from soccer_robot_perception.utils.metrics import total_variation_loss
def read_xml_file(xml_file: str):
tree = ET.parse(xml_file)
root = tree.getroot()
bb_list = []
class_list = []
for boxes in root.iter("object"):
filename = root.find("filename").text
class_list.append(CLASS_MAPPING_DETECTION[boxes.find("name").text])
ymin = int(boxes.find("bndbox/ymin").text)
xmin = int(boxes.find("bndbox/xmin").text)
ymax = int(boxes.find("bndbox/ymax").text)
xmax = int(boxes.find("bndbox/xmax").text)
bb_list.append([xmin, ymin, xmax, ymax])
return class_list, bb_list
@gin.configurable
def det_label_preprocessor(
input_width,
input_height,
channels,
bb,
class_name,
small_variance=6,
large_variance=12,
scale=4,
visualize_label_masks=False,
):
label_mask_shrinked = np.zeros(
(channels, int(input_height / scale), int(input_width / scale))
)
robot_map = np.ones((int(input_height / scale), int(input_width / scale)))
ball_map = np.ones((int(input_height / scale), int(input_width / scale)))
goalpost_map = np.ones((int(input_height / scale), int(input_width / scale)))
blob_centers = []
for box, name in zip(bb, class_name):
box = [x / scale for x in box]
if name == CLASS_MAPPING_DETECTION["ball"]:
ball_heatmap = np.dstack(
np.mgrid[
0 : int(input_height / scale) : 1, 0 : int(input_width / scale) : 1
]
)
point_x = (box[0] + box[2]) / 2
point_y = (box[1] + box[3]) / 2
start_x = int(point_x)
start_y = int(point_y)
rv = multivariate_normal(mean=[start_y, start_x], cov=small_variance)
ball_map = ball_map + rv.pdf(ball_heatmap)
blob_centers.append((start_y, start_x, name))
elif name == CLASS_MAPPING_DETECTION["robot"]:
robot_heatmap = np.dstack(
np.mgrid[
0 : int(input_height / scale) : 1, 0 : int(input_width / scale) : 1
]
)
point_x = (box[0] + box[2]) / 2
point_y = box[3]
start_x = int(point_x)
start_y = int(point_y)
rv = multivariate_normal(mean=[start_y, start_x], cov=large_variance)
robot_map = robot_map + rv.pdf(robot_heatmap)
blob_centers.append((start_y, start_x, name))
elif name == CLASS_MAPPING_DETECTION["goalpost"]:
goalpost_heatmap = np.dstack(
np.mgrid[
0 : int(input_height / scale) : 1, 0 : int(input_width / scale) : 1
]
)
point_x = box[0]
point_y = box[3]
start_x = int(point_x)
start_y = int(point_y)
rv = multivariate_normal(mean=[start_y, start_x], cov=small_variance)
goalpost_map = goalpost_map + rv.pdf(goalpost_heatmap)
blob_centers.append((start_y, start_x, name))
goalpost_heatmap = np.dstack(
np.mgrid[
0 : int(input_height / scale) : 1, 0 : int(input_width / scale) : 1
]
)
point_x = box[2]
point_y = box[3]
start_x = int(point_x)
start_y = int(point_y)
rv = multivariate_normal(mean=[start_y, start_x], cov=small_variance)
goalpost_map = goalpost_map + rv.pdf(goalpost_heatmap)
blob_centers.append((start_y, start_x, name))
if visualize_label_masks:
plt.imshow(label_mask_shrinked)
plt.show()
label_mask_shrinked[0] = ball_map
label_mask_shrinked[1] = robot_map
label_mask_shrinked[2] = goalpost_map
label_mask_shrinked = torch.tensor(label_mask_shrinked, dtype=torch.float)
return label_mask_shrinked, blob_centers
def center_of_shape(image: np.ndarray, threshold, name: int):
"""
To find centers of the contours in the input image.
:param image: Image to find the contours.
:param name: Integer denoting the classes - 1: ball, 2: robot, 3: goalpost
:return:
out_centers: All contour centers in the input image.
"""
centers = []
image = (image * 255).astype(np.uint8)
blurred = cv2.GaussianBlur(image, (3, 3), 0)
thresh, im_bw = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)
cnts = cv2.findContours(
im_bw.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
cnts = imutils.grab_contours(cnts)
for c in cnts:
M = cv2.moments(c)
if M["m00"] > 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
centers.append((cY, cX, name))
return centers
def plot_blobs(points: typing.List, variance: float):
"""
Function to plot blobs predicted and preprocessed by the network.
:param points:
:param variance:
:return:
"""
blob_map = np.ones((120, 160))
for i in points:
blob_point = [i[0], i[1]]
pos = np.dstack(np.mgrid[0:120:1, 0:160:1])
rv = multivariate_normal(mean=blob_point, cov=variance)
blob_map = blob_map + rv.pdf(pos)
return blob_map
def compute_total_variation_loss_det(img, weight: float = 0.0001):
tv_loss = total_variation_loss(img, weight)
return tv_loss
def det_image_processor_wandb(input_image, model_det_out, target):
fig = plt.figure()
plt.subplot(131)
new_image = cv2.resize(input_image.permute(1, 2, 0).numpy(), (160, 120),
interpolation=cv2.INTER_NEAREST)
plt.imshow(new_image)
plt.title('Input')
plt.subplot(132)
plt.imshow((model_det_out.permute(1, 2, 0).numpy() * 255).astype(np.uint8))
plt.title('Det out')
plt.subplot(133)
plt.imshow((target.permute(1, 2, 0).numpy() * 255).astype(np.uint8))
plt.title('Target')
plt.close()
return fig | StarcoderdataPython |
6445741 | # Generated by Django 4.0.2 on 2022-02-23 11:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Measurement',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date published')),
('chest', models.DecimalField(decimal_places=2, max_digits=5)),
('waist', models.DecimalField(decimal_places=2, max_digits=5)),
('hips', models.DecimalField(decimal_places=2, max_digits=5)),
('weight', models.DecimalField(decimal_places=2, max_digits=5)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
5153628 | """
Pet Steps
Steps file for Pet.feature
"""
import os
import json
import requests
from behave import *
from compare import expect, ensure
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
if 'VCAP_SERVICES' in os.environ or 'BINDING_CLOUDANT' in os.environ:
WAIT_SECONDS = 30
else:
WAIT_SECONDS = 5
@given('the following pets')
def step_impl(context):
""" Delete all Pets and load new ones """
headers = {'Content-Type': 'application/json'}
context.resp = requests.delete(context.base_url + '/customers/reset', headers=headers)
expect(context.resp.status_code).to_equal(204)
create_url = context.base_url + '/customers'
for row in context.table:
data = {
"id": row['id'],
"first_name": row['first_name'],
"last_name": row['last_name'],
"address": row['address'],
"email": row['email'],
"username": row['username'],
"password": row['password'],
"phone_number": row['phone_number'],
"active": row['active'] in ['True', 'true', '1']
}
payload = json.dumps(data)
context.resp = requests.post(create_url, data=payload, headers=headers)
expect(context.resp.status_code).to_equal(201)
@when('I visit the "home page"')
def step_impl(context):
""" Make a call to the base URL """
context.driver.get(context.base_url)
@then('I should see "{message}" in the title')
def step_impl(context, message):
""" Check the document title for a message """
expect(context.driver.title).to_contain(message)
@then('I should not see "{message}"')
def step_impl(context, message):
error_msg = "I should not see '%s' in '%s'" % (message, context.resp.text)
ensure(message in context.resp.text, False, error_msg)
@when('I set the "{element_name}" to "{text_string}"')
def step_impl(context, element_name, text_string):
element_id = element_name.lower()
element = context.driver.find_element_by_id(element_id)
element.clear()
if element_name == "id":
headers = {'Content-Type': 'application/json'}
resp = requests.get(context.base_url +'/customers')
customer_lists = resp.json() # it's list of dictionary
# search a particular customer by its id
customers = filter(lambda data: data.get('id', 0) == int(text_string), customer_lists)
if customers:
text_string = customers[0]['_id']
element.send_keys(text_string)
##################################################################
# This code works because of the following naming convention:
# The buttons have an id in the html hat is the button text
# in lowercase followed by '-btn' so the Clean button has an id of
# id='clear-btn'. That allows us to lowercase the name and add '-btn'
# to get the element id of any button
##################################################################
@when('I select the "{checkbox}" option')
def step_impl(context, checkbox):
print(checkbox)
select_id = checkbox.lower()
context.driver.find_element_by_id(select_id).click()
@when('I press the "{button}" button')
def step_impl(context, button):
button_id = button.lower() + '-btn'
context.driver.find_element_by_id(button_id).click()
@then('I should see "{name}" in the results')
def step_impl(context, name):
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, 'search_results'),
name
)
)
expect(found).to_be(True)
@then('I should not see "{name}" in the results')
def step_impl(context, name):
element = context.driver.find_element_by_id('search_results')
error_msg = "I should not see '%s' in '%s'" % (name, element.text)
ensure(name in element.text, False, error_msg)
@then('I should see the message "{message}"')
def step_impl(context, message):
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, 'flash_message'),
message
)
)
expect(found).to_be(True)
##################################################################
# This code works because of the following naming convention:
# The id field for text input in the html is the element name
# prefixed by 'pet_' so the Name field has an id='pet_name'
# We can then lowercase the name and prefix with pet_ to get the id
##################################################################
@then('I should see "{text_string}" in the "{element_name}" field')
def step_impl(context, text_string, element_name):
element_id = element_name.lower()
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element_value(
(By.ID, element_id),
text_string
)
)
expect(found).to_be(True)
@when('I change "{element_name}" to "{text_string}"')
def step_impl(context, element_name, text_string):
element_id = element_name.lower()
element = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.presence_of_element_located((By.ID, element_id))
)
element.clear()
element.send_keys(text_string)
| StarcoderdataPython |
9659743 | <gh_stars>0
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from __future__ import print_function
import os
import math
import argparse
import numpy as np
import cntk
import _cntk_py
from cntk.utils import *
from cntk.training_session import *
from cntk.ops import *
from cntk.distributed import *
from cntk.io import ImageDeserializer, MinibatchSource, StreamDef, StreamDefs, FULL_DATA_SWEEP
import cntk.io.transforms as xforms
from cntk.layers import Placeholder, Convolution2D, Activation, MaxPooling, Dense, Dropout, default_options, Sequential
from cntk.initializer import normal
# default Paths relative to current python file.
abs_path = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(abs_path, "Models")
log_dir = None
# model dimensions
image_height = 227
image_width = 227
num_channels = 3 # RGB
num_classes = 1000
model_name = "AlexNet.model"
# Create a minibatch source.
def create_image_mb_source(map_file, is_training, total_number_of_samples):
if not os.path.exists(map_file):
raise RuntimeError("File '%s' does not exist." %map_file)
# transformation pipeline for the features has jitter/crop only when training
transforms = []
if is_training:
transforms += [
xforms.crop(crop_type='randomside', side_ratio=0.88671875, jitter_type='uniratio') # train uses jitter
]
else:
transforms += [
xforms.crop(crop_type='center', side_ratio=0.88671875) # test has no jitter
]
transforms += [
xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'),
]
# deserializer
return MinibatchSource(
ImageDeserializer(map_file, StreamDefs(
features = StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image'
labels = StreamDef(field='label', shape=num_classes))), # and second as 'label'
randomize = is_training,
epoch_size=total_number_of_samples,
multithreaded_deserializer = True)
# Local Response Normalization layer. See Section 3.3 of the paper:
# https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf
# The mathematical equation is:
# b_{x,y}^i=a_{x,y}^i/(k+\alpha\sum_{j=max(0,i-n)}^{min(N-1, i+n)}(a_{x,y}^j)^2)^\beta
# where a_{x,y}^i is the activity of a neuron comoputed by applying kernel i at position (x,y)
# N is the total number of kernals, n is half normalization width.
def LocalResponseNormalization(k, n, alpha, beta, name=''):
x = cntk.blocks.Placeholder(name='lrn_arg')
x2 = cntk.ops.square(x)
# reshape to insert a fake singleton reduction dimension after the 3th axis (channel axis). Note Python axis order and BrainScript are reversed.
x2s = cntk.ops.reshape(x2, (1, cntk.InferredDimension), 0, 1)
W = cntk.ops.constant(alpha/(2*n+1), (1,2*n+1,1,1), name='W')
# 3D convolution with a filter that has a non 1-size only in the 3rd axis, and does not reduce since the reduction dimension is fake and 1
y = cntk.ops.convolution (W, x2s)
# reshape back to remove the fake singleton reduction dimension
b = cntk.ops.reshape(y, cntk.InferredDimension, 0, 2)
den = cntk.ops.exp(beta * cntk.ops.log(k + b))
apply_x = cntk.ops.element_divide(x, den)
return apply_x
# Create the network.
def create_alexnet():
# Input variables denoting the features and label data
feature_var = input_variable((num_channels, image_height, image_width))
label_var = input_variable((num_classes))
# apply model to input
# remove mean value
input = minus(feature_var, constant(114), name='mean_removed_input')
with default_options(activation=None, pad=True, bias=True):
z = Sequential([
# we separate Convolution and ReLU to name the output for feature extraction (usually before ReLU)
Convolution2D((11,11), 96, init=normal(0.01), pad=False, strides=(4,4), name='conv1'),
Activation(activation=relu, name='relu1'),
LocalResponseNormalization(1.0, 2, 0.0001, 0.75, name='norm1'),
MaxPooling((3,3), (2,2), name='pool1'),
Convolution2D((5,5), 192, init=normal(0.01), init_bias=0.1, name='conv2'),
Activation(activation=relu, name='relu2'),
LocalResponseNormalization(1.0, 2, 0.0001, 0.75, name='norm2'),
MaxPooling((3,3), (2,2), name='pool2'),
Convolution2D((3,3), 384, init=normal(0.01), name='conv3'),
Activation(activation=relu, name='relu3'),
Convolution2D((3,3), 384, init=normal(0.01), init_bias=0.1, name='conv4'),
Activation(activation=relu, name='relu4'),
Convolution2D((3,3), 256, init=normal(0.01), init_bias=0.1, name='conv5'),
Activation(activation=relu, name='relu5'),
MaxPooling((3,3), (2,2), name='pool5'),
Dense(4096, init=normal(0.005), init_bias=0.1, name='fc6'),
Activation(activation=relu, name='relu6'),
Dropout(0.5, name='drop6'),
Dense(4096, init=normal(0.005), init_bias=0.1, name='fc7'),
Activation(activation=relu, name='relu7'),
Dropout(0.5, name='drop7'),
Dense(num_classes, init=normal(0.01), name='fc8')
])(input)
# loss and metric
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
pe5 = classification_error(z, label_var, topN=5)
log_number_of_parameters(z) ; print()
return {
'feature': feature_var,
'label': label_var,
'ce' : ce,
'pe' : pe,
'pe5': pe5,
'output': z
}
# Create trainer
def create_trainer(network, epoch_size, num_quantization_bits, printer, block_size, warm_up):
# Set learning parameters
lr_per_mb = [0.01]*25 + [0.001]*25 + [0.0001]*25 + [0.00001]*25 + [0.000001]
lr_schedule = cntk.learning_rate_schedule(lr_per_mb, unit=cntk.learner.UnitType.minibatch, epoch_size=epoch_size)
mm_schedule = cntk.learner.momentum_schedule(0.9)
l2_reg_weight = 0.0005 # CNTK L2 regularization is per sample, thus same as Caffe
if block_size != None and num_quantization_bits != 32:
raise RuntimeError("Block momentum cannot be used with quantization, please remove quantized_bits option.")
# Create learner
local_learner = cntk.learner.momentum_sgd(network['output'].parameters, lr_schedule, mm_schedule, unit_gain=False, l2_regularization_weight=l2_reg_weight)
# Since we reuse parameter settings (learning rate, momentum) from Caffe, we set unit_gain to False to ensure consistency
# Create trainer
if block_size != None:
parameter_learner = block_momentum_distributed_learner(local_learner, block_size=block_size)
else:
parameter_learner = data_parallel_distributed_learner(local_learner, num_quantization_bits=num_quantization_bits, distributed_after=warm_up)
return cntk.Trainer(network['output'], (network['ce'], network['pe']), parameter_learner, printer)
# Train and test
def train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size, restore):
# define mapping from intput streams to network inputs
input_map = {
network['feature']: train_source.streams.features,
network['label']: train_source.streams.labels
}
# Train all minibatches
training_session(
trainer=trainer, mb_source = train_source,
var_to_stream = input_map,
mb_size = minibatch_size,
progress_frequency=epoch_size,
checkpoint_config = CheckpointConfig(filename=os.path.join(model_path, model_name), restore=restore),
cv_config= CrossValidationConfig(source=test_source, mb_size=minibatch_size)
).train()
# Train and evaluate the network.
def alexnet_train_and_eval(train_data, test_data, num_quantization_bits=32, block_size=3200, warm_up=0, minibatch_size=256, epoch_size = 1281167, max_epochs=112,
restore=True, log_to_file=None, num_mbs_per_log=None, gen_heartbeat=True):
_cntk_py.set_computation_network_trace_level(0)
progress_printer = ProgressPrinter(
freq=num_mbs_per_log,
tag='Training',
log_to_file=log_to_file,
rank=Communicator.rank(),
gen_heartbeat=gen_heartbeat,
num_epochs=max_epochs)
network = create_alexnet()
trainer = create_trainer(network, epoch_size, num_quantization_bits, progress_printer, block_size, warm_up)
train_source = create_image_mb_source(train_data, True, total_number_of_samples=max_epochs * epoch_size)
test_source = create_image_mb_source(test_data, False, total_number_of_samples=FULL_DATA_SWEEP)
train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size, restore)
if __name__=='__main__':
parser = argparse.ArgumentParser()
data_path = os.path.join(abs_path, "..", "..", "..", "DataSets", "ImageNet")
parser.add_argument('-datadir', '--datadir', help='Data directory where the ImageNet dataset is located', required=False, default=data_path)
parser.add_argument('-outputdir', '--outputdir', help='Output directory for checkpoints and models', required=False, default=None)
parser.add_argument('-logdir', '--logdir', help='Log file', required=False, default=None)
parser.add_argument('-n', '--num_epochs', help='Total number of epochs to train', type=int, required=False, default='112')
parser.add_argument('-m', '--minibatch_size', help='Minibatch size', type=int, required=False, default='256')
parser.add_argument('-e', '--epoch_size', help='Epoch size', type=int, required=False, default='1281167')
parser.add_argument('-q', '--quantized_bits', help='Number of quantized bits used for gradient aggregation', type=int, required=False, default='32')
parser.add_argument('-r', '--restart', help='Indicating whether to restart from scratch (instead of restart from checkpoint file by default)', action='store_true')
parser.add_argument('-device', '--device', type=int, help="Force to run the script on a specified device", required=False, default=None)
parser.add_argument('-b', '--block_samples', type=int, help="Number of samples per block for block momentum (BM) distributed learner (if 0 BM learner is not used)", required=False, default=None)
parser.add_argument('-a', '--distributed_after', help='Number of samples to train with before running distributed', type=int, required=False, default='0')
args = vars(parser.parse_args())
if args['outputdir'] is not None:
model_path = args['outputdir'] + "/models"
if args['logdir'] is not None:
log_dir = args['logdir']
if args['device'] is not None:
# Setting one worker on GPU and one worker on CPU. Otherwise memory consumption is too high for a single GPU.
if Communicator.rank() == 0:
cntk.device.set_default_device(cntk.device.gpu(args['device']))
else:
cntk.device.set_default_device(cntk.device.cpu())
data_path = args['datadir']
if not os.path.isdir(data_path):
raise RuntimeError("Directory %s does not exist" % data_path)
train_data = os.path.join(data_path, 'train_map.txt')
test_data = os.path.join(data_path, 'val_map.txt')
try:
alexnet_train_and_eval(train_data, test_data,
max_epochs=args['num_epochs'],
restore=not args['restart'],
log_to_file=args['logdir'],
num_mbs_per_log=200,
num_quantization_bits=args['quantized_bits'],
block_size=args['block_samples'],
warm_up=args['distributed_after'],
minibatch_size=args['minibatch_size'],
epoch_size=args['epoch_size'],
gen_heartbeat=True)
finally:
cntk.distributed.Communicator.finalize()
| StarcoderdataPython |
6533277 | import pygame
from pygame.locals import *
from pygame import mixer
import os
import time
import random
import sys
def resource_path(relative_path):
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
pygame.font.init()
WIDTH, HEIGHT = 500, 500
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("1D Shooter")
# Loading the images
simon_url = resource_path(os.path.join("assets", "simon_face.png"))
SIMON_FACE = pygame.image.load(simon_url)
#boosts images
album_covers = []
album_files = ["uan.jpg", "tmh.png", "mm.png", "mitam.png", "four.png"]
for album_file in album_files:
album_url = resource_path(os.path.join("assets", album_file))
album_image = pygame.image.load(album_url)
album_image = pygame.transform.scale(album_image, (50, 50))
album_covers.append(album_image)
# PLAYER SHIP
ONED_PLAYER_url = resource_path(os.path.join("assets", "1d_player.png"))
ONED_PLAYER = pygame.image.load(ONED_PLAYER_url)
# LASERS
# ONED_LASER = pygame.image.load(os.path.join("assets", "1d_logo.png"))
ONED_LASER_url = resource_path(os.path.join("assets", "1d_logo.png"))
ONED_LASER = pygame.image.load(ONED_LASER_url)
ONED_LASER_NEW = pygame.transform.scale(ONED_LASER, (50, 40))
CROSS_LASER_url = resource_path(os.path.join("assets", "cross.png"))
CROSS_LASER = pygame.image.load(CROSS_LASER_url)
# background
# BG = pygame.transform.scale(pygame.image.load(os.path.join("assets", "background_main.jpg")), (WIDTH, HEIGHT))
BG_url = resource_path(os.path.join("assets", "background_main.jpg"))
BG = pygame.transform.scale(pygame.image.load(BG_url), (WIDTH, HEIGHT))
# HELP = pygame.image.load(os.path.join("assets", "help_icon.png"))
HELP_url = resource_path(os.path.join("assets", "help_icon.png"))
HELP = pygame.image.load(HELP_url).convert_alpha()
HELP_NEW = pygame.transform.scale(HELP, (30, 30))
mixer.init()
start_sound = pygame.mixer.Sound(os.path.join("assets", "1d_start.mp3"))
game_sound = pygame.mixer.Sound(os.path.join("assets", "1d_music.ogg"))
buzzer_sound = pygame.mixer.Sound(os.path.join("assets", "buzzer.mp3"))
start_channel = pygame.mixer.Channel(0)
run_channel = pygame.mixer.Channel(1)
buzzer_channel = pygame.mixer.Channel(2)
class Button:
def __init__(self, x, y, img, WIN):
self.img = img
self.rect = self.img.get_rect()
self.rect.topleft = (x, y)
self.clicked = False
self.WIN = WIN
def hover(self):
is_hovering = False
pos = pygame.mouse.get_pos()
if self.rect.collidepoint(pos):
is_hovering = True
return is_hovering
def draw(self):
action = False
pos = pygame.mouse.get_pos()
if self.hover():
if pygame.mouse.get_pressed()[0] == 1 and self.clicked == False:
self.clicked = True
action = True
if pygame.mouse.get_pressed()[0] == 0:
self.clicked = False
self.WIN.blit(self.img, (self.rect.x, self.rect.y))
return action
class Laser:
def __init__(self, x, y, img):
self.x = x
self.y = y
self.img = img
self.mask = pygame.mask.from_surface(self.img)
def draw(self, window):
window.blit(self.img, (self.x, self.y))
def move(self, vel):
self.y += vel
def off_screen(self, height):
return not(self.y <= height and self.y >= 0)
def collision(self, obj):
return collide(self, obj)
class Ship:
COOLDOWN = 30
def __init__(self, x, y, health=100):
self.x = x
self.y = y
self.health = health
self.ship_img = None # draw ship
self.laser_img = None # draw lasers
self.lasers = []
self.cool_down_counter = 0 # how long to wait till you shoot again
def draw(self, window):
window.blit(self.ship_img, (self.x, self.y))
for laser in self.lasers:
laser.draw(window)
def move_lasers(self, vel, obj):
self.cooldown()
did_a_hit = False
for laser in self.lasers:
laser.move(vel)
if laser.off_screen(HEIGHT):
self.lasers.remove(laser)
elif laser.collision(obj):
did_a_hit = True
obj.health -= 10
self.lasers.remove(laser)
return did_a_hit
def cooldown(self):
if self.cool_down_counter >= self.COOLDOWN:
self.cool_down_counter = 0
elif self.cool_down_counter > 0:
self.cool_down_counter += 1
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
def get_width(self):
return self.ship_img.get_width()
def get_height(self):
return self.ship_img.get_height()
class Player(Ship):
def __init__(self, x, y, health=50):
super().__init__(x, y, health)
self.ship_img = ONED_PLAYER
self.laser_img = ONED_LASER_NEW
self.mask = pygame.mask.from_surface(self.ship_img)
self.max_health = health
self.score = 0
def move_lasers(self, vel, objs):
self.cooldown()
for laser in self.lasers:
laser.move(vel)
if laser.off_screen(HEIGHT):
self.lasers.remove(laser)
else:
for obj in objs:
if laser.collision(obj):
self.score += 1
objs.remove(obj)
if laser in self.lasers:
self.lasers.remove(laser)
def draw(self, window):
super().draw(window)
self.healthbar(window)
def healthbar(self, window):
pygame.draw.rect(window, (255, 0, 0), (self.x, self.y +
self.ship_img.get_height() + 10, self.ship_img.get_width(), 10))
pygame.draw.rect(window, (0, 255, 0), (self.x, self.y + self.ship_img.get_height() + 10,
self.ship_img.get_width() * (1-((self.max_health-self.health)/self.max_health)), 10))
class Enemy(Ship):
def __init__(self, x, y, health=100):
super().__init__(x, y, health)
self.ship_img = SIMON_FACE
self.laser_img = CROSS_LASER
self.mask = pygame.mask.from_surface(self.ship_img)
self.attack = 10
def move(self, vel):
self.y += vel
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x + 10, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
class Boost(Ship):
def __init__(self, x, y):
super().__init__(x, y)
self.ship_img = random.choice(album_covers)
self.mask = pygame.mask.from_surface(self.ship_img)
self.attack = 10
def move(self, vel):
self.y += vel
def collide(obj1, obj2):
offset_x = obj2.x - obj1.x
offset_y = obj2.y - obj1.y
return obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) != None
class Game:
def __init__(self, WIN):
self.pause = False
self.helping = False
self.score = 0
self.run = False
self.FPS = 60 # frames per second
self.level = 0
self.main_font = pygame.font.SysFont("arial", 30)
self.click = pygame.mouse.get_pressed()
self.WIN = WIN
self.enemies = []
self.boosts = []
self.wave_length = 5
self.enemy_vel = 1
self.enemy_vel = 2
self.player_vel = 5
self.laser_vel = 5
self.player = Player(WIDTH/2 - ONED_PLAYER.get_width()/2, 300)
self.clock = pygame.time.Clock()
self.lost = False
self.lost_count = 0
def redraw_window(self):
self.mouse_pointer()
self.WIN.blit(BG, (0, 0)) # redraws image
# draw text
score_label = self.main_font.render(
f"Score: {self.player.score}", 1, (255, 255, 255))
level_label = self.main_font.render(
f"Level: {self.level}", 1, (255, 255, 255))
self.WIN.blit(score_label, (10, 10))
self.WIN.blit(level_label, (WIDTH - level_label.get_width() - 10, 10))
for enemy in self.enemies:
enemy.draw(self.WIN)
for boost in self.boosts:
boost.draw(self.WIN)
self.player.draw(self.WIN)
if self.lost:
self.run = False
self.score = self.player.score
self.lost_page()
self.display_mouse()
pygame.display.update()
def game_loop(self): # when the program is running
start_channel.stop()
run_channel.play(game_sound, loops=-1, fade_ms=1000)
self.lost = False
self.level = 0
self.enemies = []
self.wave_length = 5
self.enemy_vel = 1
self.boost_vel = 3
self.player_vel = 7
self.laser_vel = 4
self.player = Player(WIDTH/2 - ONED_PLAYER.get_width()/2, 300)
while self.run:
self.clock.tick(self.FPS)
self.redraw_window()
if self.player.health <= 0:
self.lost = True
self.lost_count += 1
if len(self.enemies) == 0:
self.level += 1
self.wave_length += 5
for i in range(self.wave_length):
enemy = Enemy(random.randrange(50, WIDTH - 100),
random.randrange(-1500, -100))
self.enemies.append(enemy)
if random.random() < 0.00167:
boost = Boost(random.randrange(50, WIDTH - 50),
random.randrange(-1500, -100))
self.boosts.append(boost)
for event in pygame.event.get():
if event.type == pygame.QUIT: # if press X button
quit()
self.controls()
got_hit = False
for enemy in self.enemies:
enemy.move(self.enemy_vel)
if enemy.move_lasers(self.laser_vel, self.player):
got_hit = True
if random.randrange(0, 2*60) == 1:
enemy.shoot()
if collide(enemy, self.player):
self.player.health -= enemy.attack
self.enemies.remove(enemy)
elif enemy.y + enemy.get_height() > HEIGHT:
self.score -= 1
self.enemies.remove(enemy)
if got_hit:
buzzer_channel.play(buzzer_sound)
for boost in self.boosts:
boost.move(self.boost_vel)
if collide(boost, self.player):
self.player.health = min(self.player.health + boost.attack, self.player.max_health)
self.boosts.remove(boost)
elif boost.y + boost.get_height() > HEIGHT:
self.boosts.remove(boost)
self.player.move_lasers(-self.laser_vel, self.enemies)
def controls(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and self.player.x - self.player_vel > 0: # left
self.player.x -= self.player_vel
if keys[pygame.K_RIGHT] and self.player.x + self.player_vel + self.player.get_width() < WIDTH:
self.player.x += self.player_vel
if keys[pygame.K_UP] and self.player.y - self.player_vel > 0:
self.player.y -= self.player_vel
if keys[pygame.K_DOWN] and self.player.y + self.player_vel + self.player.get_height() + 15 < HEIGHT:
self.player.y += self.player_vel
if keys[pygame.K_SPACE]:
self.player.shoot()
if keys[pygame.K_ESCAPE]:
self.paused()
def unpaused(self):
self.pause = False
run_channel.unpause()
buzzer_channel.unpause()
def paused(self):
self.pause = True
run_channel.pause()
buzzer_channel.pause()
self.PAUSE_font = pygame.font.SysFont("arial", 30)
mouse = pygame.mouse.get_pos()
while self.pause:
self.WIN.blit(BG, (0, 0))
self.PAUSE_label = self.PAUSE_font.render(
"Game is Paused. Click to Continue", 1, (255, 255, 255))
self.WIN.blit(self.PAUSE_label, (WIDTH/2 -
self.PAUSE_label.get_width()/2, 200))
self.mouse_pointer()
button_y = HEIGHT/2 + 100
self.button("MAIN MENU", WIDTH/2 - 150, button_y, 100,
50, (255, 0, 0), (200, 0, 0), "new_game", True)
self.button("RESUME", WIDTH/2, button_y, 100,
50, (255, 0, 0), (200, 0, 0), "resume", True)
self.button("QUIT", WIDTH/2 + 150, button_y, 100,
50, (255, 0, 0), (200, 0, 0), "quit", True)
self.display_mouse()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
def mouse_pointer(self):
self.cursor = pygame.cursors.Cursor(pygame.SYSTEM_CURSOR_ARROW)
def mouse_hand(self):
self.cursor = pygame.cursors.Cursor(pygame.SYSTEM_CURSOR_HAND)
def display_mouse(self):
pygame.mouse.set_cursor(*self.cursor)
def button(self, msg: str, x: int, y: int, w: int, h: int, ic, ac, action=None, centered=False):
"""
Add a button
"""
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
button_font = pygame.font.SysFont("arial", 20)
button_label = button_font.render(msg, 1, (255, 255, 255))
text_w = button_label.get_width()
text_h = button_label.get_height()
center_x = x + w/2
center_y = x + h/2
w = max(w, text_w)
h = max(h, text_h)
if centered:
x = x - w/2
y = y - h/2
mouse_is_released = (click[0] == 1)
# mouse_is_released = pygame.MOUSEBUTTONUP in [event.type for event in pygame.event.get()]
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(self.WIN, ac, (x, y, w, h))
self.mouse_hand()
if mouse_is_released and action != None:
if action == "start":
start_channel.pause()
self.game_loop()
if action == "resume":
self.unpaused()
if action == "quit":
quit()
if action == "help":
self.help_menu()
if action == "main":
self.main_menu()
if action == "new_game":
self.new_game()
else:
pygame.draw.rect(self.WIN, ic, (x, y, w, h))
self.WIN.blit(button_label, (x + (w - text_w)/2, y + (h - text_h)/2))
def help_menu(self):
helping = True
self.help_font = pygame.font.SysFont("arial", 30)
mouse = pygame.mouse.get_pos()
while helping:
self.WIN.blit(BG, (0, 0))
help_label = self.help_font.render(
"Kill <NAME>!! Use your arrow keys", 1, (255, 255, 255))
help_label2 = self.help_font.render(
"to move 1D and shoot with SpaceBar", 1, (255, 255, 255))
self.WIN.blit(
help_label, (WIDTH/2 - help_label.get_width()/2, 100))
self.WIN.blit(
help_label2, (WIDTH/2 - help_label2.get_width()/2, 150))
self.mouse_pointer()
self.button("BACK TO MAIN MENU", WIDTH/2, HEIGHT/2 + 25, 100,
50, (255, 0, 0), (200, 0, 0), "main", True)
self.button("QUIT", WIDTH/2, HEIGHT/2 + 100, 100,
50, (255, 0, 0), (200, 0, 0), "quit", True)
self.display_mouse()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
pygame.quit()
def lost_page(self):
self.run = True
run_channel.stop()
buzzer_channel.stop()
self.WIN.blit(BG, (0, 0))
self.lost_font = pygame.font.SysFont("arial", 30)
self.lost_label = self.lost_font.render(
"Game Over!! Your score was: " + str(self.score), 1, (255, 255, 255))
self.WIN.blit(self.lost_label, (WIDTH/2 -
self.lost_label.get_width()/2, 100))
self.mouse_pointer()
button_y = HEIGHT/2
self.button("MAIN MENU", WIDTH/2 - 150, button_y, 100,
50, (255, 0, 0), (200, 0, 0), "new_game", True)
self.button("RESTART", WIDTH/2, button_y, 100,
50, (255, 0, 0), (200, 0, 0), "start", True)
self.button("QUIT", WIDTH/2 + 150, button_y, 100,
50, (255, 0, 0), (200, 0, 0), "quit", True)
self.display_mouse()
def new_game(self):
start_channel.stop()
run_channel.stop()
buzzer_channel.stop()
start_channel.play(start_sound, loops=-1, fade_ms=5000)
self.main_menu()
def main_menu(self):
self.title_font = pygame.font.SysFont("arial", 30)
self.run = True
while self.run:
self.WIN.blit(BG, (0, 0))
self.WIN.blit(HELP_NEW, (0, 0))
self.title_label = self.title_font.render(
"Press the button to begin...", 1, (255, 255, 255))
self.WIN.blit(self.title_label, (WIDTH/2 -
self.title_label.get_width()/2, 200))
self.mouse_pointer()
self.button("START", WIDTH/2, HEIGHT/2 + 100, 100,
50, (255, 0, 0), (200, 0, 0), "start", True)
help_button = Button(0, 0, HELP_NEW, self.WIN)
if help_button.hover():
self.mouse_hand()
self.display_mouse()
if help_button.draw():
self.help_menu()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.run = False
pygame.quit()
if __name__ == "__main__":
mixer.init()
start_channel.play(start_sound, loops=-1, fade_ms=5000)
game = Game(WINDOW)
game.main_menu()
| StarcoderdataPython |
4841447 | <gh_stars>1-10
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential
import tensorflow.keras.layers as layers
from tensorflow.keras.regularizers import L2
from tensorflow.keras.applications import VGG16
from .custom_layers import L2Normalization
from models import registry
WEIGHTS_PATH_NO_TOP = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.1/'
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5')
def vgg16(pretrained=True):
inputs = layers.Input(shape=[None, None, 3])
block1_conv1 = layers.Conv2D(64, 3, padding='same', activation='relu', name='block1_conv1')(inputs)
block1_conv2 = layers.Conv2D(64, 3, padding='same', activation='relu', name='block1_conv2')(block1_conv1)
block1_pool = layers.MaxPool2D(2, 2, padding='same', name='block1_pool')(block1_conv2)
block2_conv1 = layers.Conv2D(128, 3, padding='same', activation='relu', name='block2_conv1')(block1_pool)
block2_conv2 = layers.Conv2D(128, 3, padding='same', activation='relu', name='block2_conv2')(block2_conv1)
block2_pool = layers.MaxPool2D(2, 2, padding='same', name='block2_pool')(block2_conv2)
block3_conv1 = layers.Conv2D(256, 3, padding='same', activation='relu', name='block3_conv1')(block2_pool)
block3_conv2 = layers.Conv2D(256, 3, padding='same', activation='relu', name='block3_conv2')(block3_conv1)
block3_conv3 = layers.Conv2D(256, 3, padding='same', activation='relu', name='block3_conv3')(block3_conv2)
block3_pool = layers.MaxPool2D(2, 2, padding='same', name='block3_pool')(block3_conv3)
block4_conv1 = layers.Conv2D(512, 3, padding='same', activation='relu', name='block4_conv1')(block3_pool)
block4_conv2 = layers.Conv2D(512, 3, padding='same', activation='relu', name='block4_conv2')(block4_conv1)
block4_conv3 = layers.Conv2D(512, 3, padding='same', activation='relu', name='block4_conv3')(block4_conv2)
block4_pool = layers.MaxPool2D(2, 2, padding='same', name='block4_pool')(block4_conv3)
block5_conv1 = layers.Conv2D(512, 3, padding='same', activation='relu', name='block5_conv1')(block4_pool)
block5_conv2 = layers.Conv2D(512, 3, padding='same', activation='relu', name='block5_conv2')(block5_conv1)
block5_conv3 = layers.Conv2D(512, 3, padding='same', activation='relu', name='block5_conv3')(block5_conv2)
vgg16 = tf.keras.models.Model(inputs = inputs, outputs=[block4_conv3, block5_conv3])
if pretrained:
print("Using pretrained weights of VGG16!!!")
pretrained_weight = tf.keras.utils.get_file('vgg16', WEIGHTS_PATH_NO_TOP)
vgg16.load_weights(pretrained_weight, by_name=True)
return vgg16
def VGG16(vgg16):
block4_conv3, block5_conv3 = vgg16.outputs
block4_conv3 = L2Normalization(gamma_init=20)(block4_conv3)
pool5 = layers.MaxPool2D(3, 1, padding='same')(block5_conv3)
conv6 = layers.Conv2D(1024, 3, padding='same', dilation_rate=6, activation='relu')(pool5)
conv7 = layers.Conv2D(1024, 1, padding='same', activation='relu')(conv6)
model = tf.keras.models.Model(inputs=vgg16.inputs, outputs=[block4_conv3, conv7])
return model
def create_extra_layers(VGG16):
out_38x38, out_19x19 = VGG16.outputs
conv8_1 = layers.Conv2D(256, 1, activation='relu', padding='same')(out_19x19)
conv8_pad = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(conv8_1)
conv8_2 = layers.Conv2D(512, 3, strides=2, padding='valid', activation='relu')(conv8_pad)
conv9_1 = layers.Conv2D(128, 1, activation='relu', padding='same')(conv8_2)
conv9_pad = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(conv9_1)
conv9_2 = layers.Conv2D(256, 3, strides=2, padding='valid', activation='relu')(conv9_pad)
conv10_1 = layers.Conv2D(128, 1, activation='relu', padding='same')(conv9_2)
conv10_2 = layers.Conv2D(256, 3, activation='relu', padding='valid')(conv10_1)
conv11_1 = layers.Conv2D(128, 1, activation='relu', padding='same')(conv10_2)
conv11_2 = layers.Conv2D(256, 3, activation='relu', padding='valid')(conv11_1)
model = tf.keras.models.Model(inputs= VGG16.inputs, outputs=[out_38x38, out_19x19, conv8_2, conv9_2, conv10_2, conv11_2])
return model
class VGG(tf.keras.layers.Layer):
def __init__(self, cfg=None, pretrained=True):
super(VGG, self).__init__()
vgg = vgg16(pretrained)
fcs = VGG16(vgg)
self.model = create_extra_layers(fcs)
def call(self, x):
outputs = self.model(x)
return outputs
@registry.BACKBONES.register('vgg')
def vgg(cfg, pretrained=True):
model = VGG(cfg, pretrained)
return model
if __name__ == '__main__':
import numpy as np
vgg = VGG(pretrained=True)
rand_inp = tf.random.normal([4, 300, 300, 3])
outpus = vgg(rand_inp)
for out in outpus:
print(out.shape) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.