seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8824516219 | # -*- coding: utf-8 -*-
import argparse
import sys
import gym
from gym import wrappers, logger
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import numpy as np
import random
from random import choices
class RandomAgent(object):
def __init__(self, action_space):
"""Initialize an Agent object.
Params
=======
size (int): size of the memory
memory (array()): memory of the agent
batch_size (int): size of the part of memory which is selected (N)
state_size (int): dimension of each state (D_in)
action_size (int): dimension of each action (D_out)
"""
self.action_space = action_space
self.size = 100000 # Memory size
self.memory = []
self.batch_size = 32
self.state_size = 4
self.action_size = 2
self.learning_rate = 1e-3
self.model = MultipleLayer(self.state_size, 100, self.action_size, 1)
self.model_duplicata = MultipleLayer(self.state_size, 100, self.action_size, 1)
self.loss_fn = torch.nn.MSELoss(reduction='sum')
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
self.learn_state = 0
self.gamma = 0.95
self.upadteModel()
# action 1 = droite action 0 = gauche
def act(self, observation, reward, done):
epsilon = 0.1
rnd = random.uniform(0, 1)
res = self.model(torch.tensor(observation).float())
maxval, idx = res.max(0)
maxval, idx2 = res.min(0)
if rnd < 1-epsilon:
indices = idx.item()
else:
indices = idx2.item()
return indices
def upadteModel(self):
self.model_duplicata.linear1 = self.model.linear1
self.model_duplicata.w = self.model.w
self.model_duplicata.linear2 = self.model.linear2
def remember(self, value):
self.memory.append(value)
if len(self.memory) > self.size:
self.memory.pop(0)
def showMemory(self):
print(self.memory)
def getMemory(self):
return self.memory
def retry(self, batch_size):
minibatch = random.sample(self.memory, self.batch_size)
for etat, action, etat_suivant, reward, done in minibatch:
qO = self.model(torch.tensor(etat).float())
qOsa = qO[action]
qO_suivant = self.model_duplicata(torch.tensor(etat_suivant).float())
rPlusMaxNext = reward + self.gamma*torch.max(qO_suivant)
if not done :
JO = pow(qOsa - rPlusMaxNext, 2)
else :
JO = pow(qOsa - reward, 2)
loss = self.loss_fn(qOsa, JO)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if (self.learn_state % 10000 == 0):
print("learn_state : ", self.learn_state)
self.upadteModel()
self.learn_state +=1
class MultipleLayer(torch.nn.Module):
def __init__(self, D_in, H, D_out, nbcouche):
super(MultipleLayer, self).__init__()
self.n_couche = nbcouche
self.linear1 = torch.nn.Linear(D_in, H)
self.w = [torch.nn.Linear(H,H) for i in range(nbcouche)]
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
y_pred = torch.sigmoid(self.linear1(x))
for n in range(self.n_couche-1):
y_pred = torch.sigmoid(self.w[n](y_pred))
y_pred = self.linear2(y_pred)
return y_pred
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('env_id', nargs='?', default='CartPole-v1', help='Select the environment to run')
args = parser.parse_args()
logger.set_level(logger.INFO)
env = gym.make(args.env_id)
outdir = '/tmp/random-agent-results'
env = wrappers.Monitor(env, directory=outdir, force=True)
env.seed(0)
agent = RandomAgent(env.action_space)
listSomme = []
episode_count = 260
reward = 1
max_reward = 500
etat_space = env.observation_space.shape[0]
action_space = env.action_space.n
for i in range(episode_count):
somme = 0
etat = env.reset()
done = False
while True:
# env.render()
action = agent.act(etat, reward, done)
etat_suivant, reward, done, _ = env.step(action)
reward = reward if not done else -10
tensorAdd = (etat, action, etat_suivant, reward, done)
agent.remember(tensorAdd)
etat = etat_suivant
somme += reward
if done:
agent.upadteModel()
break
if somme > max_reward:
break
if len(agent.memory) > agent.batch_size:
agent.retry(agent.batch_size)
listSomme.append(somme)
x = np.arange(episode_count)
y = np.array(listSomme)
plt.plot(x, y, "-ob", markersize=2, label="nom de la courbe")
plt.show()
env.close()
| ThibaudPerrin/tp2-bio-inspi | TP2_Cartpole.py | TP2_Cartpole.py | py | 5,135 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.MSELoss",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.Adam",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"li... |
74108338023 | # first order fluid-flow model based on the theory of planned behavior
from pylab import array, linspace
from scipy import integrate #for integrate.odeint
# setup logging
import logging
logging.basicConfig(filename='src/__logs/firstOrderModel2.log',\
level=logging.DEBUG,\
format='%(asctime)s %(levelname)s:%(message)s')
from .agent_defaultPersonality import agent as agentConstructor
#GLOBAL VARS:
agent = agentConstructor()
samp = 2 #samples per time step
def fakeFunc(A,t): return -1.0 #fake function for allocating space
ETA = [integrate.odeint(fakeFunc,[0,0],linspace(0,1,10)),\
integrate.odeint(fakeFunc,[0,0],linspace(0,1,10)),\
integrate.odeint(fakeFunc,[0,0],linspace(0,1,10)),\
integrate.odeint(fakeFunc,[0,0],linspace(0,1,10)),\
integrate.odeint(fakeFunc,[0,0],linspace(0,1,10))]
XI = fakeFunc
def getEta(data,t,xi):
global samp, ETA, time, agent, XI
if t < len(data):
return data[t]
else:
XI = xi # update input function from paramteter
if len(data) == 0:
ETA0 = getInitialEta(agent.beta,agent.gamma,XI)
data.append(ETA0[:])
for T in range(len(data),t+1):
# TODO: should this be samp*t so that accuracy is not lost far from 0???
logging.info('solving ode @ t='+str(T)+', using '+str(samp)+' sub-samples')
time = linspace(0,T,samp) #(start,end,nSamples)
etadot_0 = [0,0,0,0,0] #assumption of 1st order model
#get arrays of data len=samp*t
ETA[0] = integrate.odeint(eta1Func,[data[0][0],etadot_0[0]],time)
ETA[1] = integrate.odeint(eta2Func,[data[0][1],etadot_0[1]],time)
ETA[2] = integrate.odeint(eta3Func,[data[0][2],etadot_0[2]],time)
ETA[3] = integrate.odeint(eta4Func,[data[0][3],etadot_0[3]],time)
ETA[4] = integrate.odeint(eta5Func,[data[0][4],etadot_0[4]],time)
logging.debug('len(result)='+str(len(ETA[0][:,0])))
# restructure ETA using [eta#][time , eta_or_dEta] )
E = [ETA[0][-1,0],\
ETA[1][-1,0],\
ETA[2][-1,0],\
ETA[3][-1,0],\
ETA[4][-1,0]]
data.append(E)
return data[t]
# === PRIVATE METHODS ===
def eta1Func(A,t):
#these come from calling function
global XI, agent
logging.debug( 'A='+str(A) )
eta = A[0]
etaDot=A[1]
# logging.debug( '(agent.gamma*XI(t-agent.theta)-eta)/agent.tau' )
# logging.debug( '('+str(agent.gamma[0,0])+'*'+str(XI(t-agent.theta[0])[0])+'-'+str(eta)+')/' + str(agent.tau[0]) + '=' )
etaDDot= (agent.gamma[0,0]*XI(t-agent.theta[0])[0] - eta)/agent.tau[0]
logging.debug( 'eta1etaDDot='+str(etaDDot) )
return checkValue(etaDDot)
def eta2Func(A,t):
#these come from calling function
global XI, agent
eta = A[0]
etaDot = A[1]
etaDDot= (agent.gamma[1,1]*XI(t-agent.theta[1])[1] - eta)/agent.tau[1]
return checkValue(etaDDot)
def eta3Func(A,t):
#these come from calling function
global XI, agent
eta = A[0]
etaDot = A[1]
etaDDot= (agent.gamma[2,2]*XI(t-agent.theta[2])[2] - eta)/agent.tau[2]
return checkValue(etaDDot)
def eta4Func(A,t):
#these come from calling function
global agent
eta = A[0]
etaDot = A[1]
etaDDot= ( agent.beta[3,0]*pastEta(t-agent.theta[3],0) \
+ agent.beta[3,1]*pastEta(t-agent.theta[4],1) \
+ agent.beta[3,2]*pastEta(t-agent.theta[5],2) \
- eta)/agent.tau[3]
return checkValue(etaDDot)
def eta5Func(A,t):
#these come from calling function
global agent
eta = A[0]
etaDot = A[1]
etaDDot= ( agent.beta[4,3]*pastEta(t-agent.theta[6],3) \
+ agent.beta[4,2]*pastEta(t-agent.theta[7],2) \
- eta)/agent.tau[4]
return checkValue(etaDDot)
# values cannot fall below 0! ... or can they?
def checkValue(v):
#logging.debug( 'val='+str(v) )
return v
#if v < 0 :
# return 0
#else:
# return v
#finds initial eta values based on steady-state assumption
def getInitialEta(beta,gamma,xi):
eta0 = gamma[0,0]*xi(0)[0]
eta1 = gamma[1,1]*xi(0)[1]
eta2 = gamma[2,2]*xi(0)[2]
eta3 = beta[3,0]*eta0 + beta[3,1]*eta1 + beta[3,2]*eta2
eta4 = beta[4,3]*eta3 + beta[4,2]*eta2
return array([eta0,eta1,eta2,eta3,eta4])
#function to lookup a past eta (for time delays)
def pastEta(T,etaIndex):
global ETA, samp, agent, XI
indexOfTime = int(round(T/samp))
#logging.debug( T )
if(indexOfTime<=0):
return getInitialEta(agent.beta,agent.gamma,XI);
elif indexOfTime>=len(ETA[etaIndex][:,0]):
logging.error('attempted reference to future Eta')
return ETA[etaIndex][-1,0]
else:
logging.debug( ' time:'+str(T) )
logging.debug( 'index:'+str(indexOfTime) )
logging.debug( ' len:'+str(len(ETA[etaIndex][:,0])) )
logging.debug( 'value:'+str(ETA[etaIndex][indexOfTime,0]) ) #[eta#][time , eta_or_dEta] )
return ETA[etaIndex][indexOfTime,0]
| PIELab/behaviorSim | behaviorSim/PECSagent/state/CSEL/OLD/model_firstOrder.py | model_firstOrder.py | py | 4,656 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "agent_defaultPersonality.agent",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "s... |
15287706589 | ##encoding=UTF8
"""
This module provides high performance iterator recipes.
best time and memory complexity implementation applied.
compatible: python2 and python3
import:
from .iterable import (take, flatten, flatten_all, nth, shuffled, grouper, grouper_dict, grouper_list,
running_windows, cycle_running_windows, cycle_slice, count_generator)
"""
from __future__ import print_function
import collections
import itertools
import random
import sys
is_py2 = (sys.version_info[0] == 2)
if is_py2:
from itertools import ifilterfalse as filterfalse, izip_longest as zip_longest
else: # in python3
from itertools import filterfalse, zip_longest
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(itertools.islice(iterable, n))
def flatten(listOfLists):
"Flatten one level of nesting"
return itertools.chain.from_iterable(listOfLists)
def flatten_all(listOfLists):
"Flatten arbitrary depth of nesting, better for unknown nesting structure iterable object"
for i in listOfLists:
if hasattr(i, "__iter__"):
for j in flatten_all(i):
yield j
else:
yield i
def nth(iterable, n, default=None):
"Returns the nth item or a default value"
return next(itertools.islice(iterable, n, None), default)
def shuffled(iterable):
"Returns the shuffled iterable"
return random.sample(iterable, len(iterable))
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def grouper_dict(DICT, n):
"evenly divide DICTIONARY into fixed-length piece, no filled value if chunk size smaller than fixed-length"
for group in grouper(DICT, n):
chunk_d = dict()
for k in group:
if k != None:
chunk_d[k] = DICT[k]
yield chunk_d
def grouper_list(LIST, n):
"evenly divide LIST into fixed-length piece, no filled value if chunk size smaller than fixed-length"
for group in grouper(LIST, n):
chunk_l = list()
for i in group:
if i != None:
chunk_l.append(i)
yield chunk_l
def running_windows(iterable, size):
"""generate n-size running windows
e.g. iterable = [1,2,3,4,5], size = 3
yield: [1,2,3], [2,3,4], [3,4,5]
"""
fifo = collections.deque(maxlen=size)
for i in iterable:
fifo.append(i)
if len(fifo) == size:
yield list(fifo)
def cycle_running_windows(iterable, size):
"""generate n-size cycle running windows
e.g. iterable = [1,2,3,4,5], size = 2
yield: [1,2], [2,3], [3,4], [4,5], [5,1]
"""
fifo = collections.deque(maxlen=size)
cycle = itertools.cycle(iterable)
counter = itertools.count(1)
length = len(iterable)
for i in cycle:
fifo.append(i)
if len(fifo) == size:
yield list(fifo)
if next(counter) == length:
break
def cycle_slice(LIST, start, end): # 测试阶段, 不实用
"""given a list, return right hand cycle direction slice from start to end
e.g.
array = [0,1,2,3,4,5,6,7,8,9]
cycle_slice(array, 4, 7) -> [4,5,6,7]
cycle_slice(array, 8, 2) -> [8,9,0,1,2]
"""
if type(LIST) != list:
LIST = list(LIST)
if end >= start:
return LIST[start:end+1]
else:
return LIST[start:] + LIST[:end+1]
def padding_left_shift(array, left_shift):
"""padding_left_shift([1, 1, 1, 2, 2, 2, 2, 2, 4, 4, 4], 1)
[1, 1, 1, 2, 2, 2, 2, 2, 4, 4, 4] to
[1, 1, 2, 2, 2, 2, 2, 4, 4, 4, 4]
"""
new_array = collections.deque(array)
last = new_array[-1]
new_array.rotate(-left_shift)
for _ in range(left_shift):
new_array.pop()
for _ in range(left_shift):
new_array.append(last)
return new_array
def padding_right_shift(array, right_shift):
"""padding_right_shift([1, 1, 1, 2, 2, 2, 2, 2, 4, 4, 4], 1)
[1, 1, 1, 2, 2, 2, 2, 2, 4, 4, 4] to
[1, 1, 1, 1, 2, 2, 2, 2, 2, 4, 4]
"""
new_array = collections.deque(array)
first = new_array[0]
new_array.rotate(right_shift)
for _ in range(right_shift):
new_array.popleft()
for _ in range(right_shift):
new_array.appendleft(first)
return new_array
def count_generator(generator, memory_efficient=True):
"""count number of item in generator
memory_efficient=True, 3 times slower, but memory_efficient
memory_efficient=False, faster, but cost more memory
"""
if memory_efficient:
counter = 0
for _ in generator:
counter += 1
return counter
else:
return len(list(generator))
if __name__ == "__main__":
from angora.GADGET.pytimer import Timer
import time
import unittest
timer = Timer()
class IterToolsUnittest(unittest.TestCase):
def setUp(self):
self.iterable_generator = range(10)
self.iterable_list = list(range(10))
self.iterable_set = set(list(range(10)))
self.iterable_dict = {i: chr(j) for i, j in zip(range(1, 11), range(65, 75))}
def test_take(self):
self.assertEqual(take(5, self.iterable_generator), [0, 1, 2, 3, 4])
self.assertEqual(take(5, self.iterable_list), [0, 1, 2, 3, 4])
self.assertEqual(take(5, self.iterable_set), [0, 1, 2, 3, 4])
self.assertEqual(take(5, self.iterable_dict), [1, 2, 3, 4, 5])
def test_flatten(self):
"""测试flatten的性能, 应该要比二重循环性能好
"""
complexity = 1000
iterable = [list(range(complexity))] * complexity
timer.start()
for _ in flatten(iterable):
pass
print("fatten method takes %.6f second" % timer.stop())
timer.start()
for chunk in iterable:
for _ in chunk:
pass
print("double for loop method takes %.6f second" % timer.stop())
def test_flatten_all(self):
"""flatten_all slower, but more convenient. And you don't need to know how iterable
nested in each other.
"""
complexity = 100
iterable = [[list(range(complexity))] * complexity] * complexity
timer.start()
for _ in flatten_all(iterable):
pass
print("fatten_all method takes %.6f second" % timer.stop())
timer.start()
for chunk1 in iterable:
for chunk2 in chunk1:
for _ in chunk2:
pass
print("nested for loop method takes %.6f second" % timer.stop())
def test_nth(self):
self.assertEqual(nth(self.iterable_list, 5), 5)
def test_count_generator(self):
self.assertEqual(count_generator(self.iterable_generator), 10)
def number_generator():
for i in range(1000000):
yield i
timer.start()
count_generator(number_generator(), memory_efficient=True)
print("memory_efficient way takes %s second" % timer.stop())
timer.start()
count_generator(number_generator(), memory_efficient=False)
print("non-memory_efficient way takes %s second" % timer.stop())
unittest.main()
def test_flatten():
"""测试flatten的性能
"""
print("{:=^40}".format("test_flatten"))
complexity = 1000
a = [[1,2,3],[4,5,6],[7,8,9,10]] * complexity
st = time.clock()
for _ in flatten(a):
pass
print(time.clock() - st)
st = time.clock()
for chunk in a:
for _ in chunk:
pass
print(time.clock() - st)
# test_flatten()
def test_flatten_all():
"""测试flatten_all的性能
"""
print("{:=^40}".format("test_flatten_all"))
complexity = 1000
a = [[1,2,3],[4,[5,6],[7,8]], [9,10]] * complexity
b = range(complexity * 10)
st = time.clock()
for _ in flatten_all(a):
pass
print(time.clock() - st)
st = time.clock()
for _ in b:
pass
print(time.clock() - st)
# test_flatten_all()
def test_nth():
"""测试nth的性能
"""
print("{:=^40}".format("test_flatten_all"))
n = 10000
array = [i for i in range(n)]
st = time.clock()
for i in range(n):
_ = array[i]
print(time.clock() - st)
st = time.clock()
for i in range(n):
_ = nth(array, i)
print(time.clock() - st)
st = time.clock()
for i in array:
_ = i
print(time.clock() - st)
# test_nth()
def test_grouper():
"""Test for grouper, grouper_list, grouper_dict
"""
print("{:=^40}".format("test_grouper"))
for chunk in grouper("abcdefg",3):
print(chunk)
# test_grouper()
def test_grouper_dict_list():
"""Test for grouper_dict, grouper_list
"""
print("{:=^40}".format("test_grouper_dict_list"))
print("=== test for grouper_dict ===")
a = {key: "hello" for key in range(10)} ## test grouper_list
for chunk_d in grouper_dict(a, 3):
print(chunk_d)
print("=== test for grouper_list ===")
complexity = 1000000
timer.start()
b = range(complexity) # test grouper_dict
for chunk_l in grouper_list(b, 1000):
# print(chunk_l)
pass
timer.timeup()
timer.start()
chunk_l = list()
for i in b:
chunk_l.append(i)
if len(chunk_l) == 1000:
# print(chunk_l)
chunk_l = list()
# print(chunk_l)
timer.timeup()
# test_grouper_dict_list()
def timetest_grouper():
array = [[1,2,3] for _ in range(1000)]
def regular():
for item in array:
pass
def use_grouper():
for chunk_l in grouper_list(array, 10):
for item in chunk_l:
pass
timer.test(regular, 1000)
timer.test(use_grouper, 1000)
# timetest_grouper()
def test_running_windows():
print("{:=^40}".format("test_running_windows"))
array = [0,1,2,3,4]
print("Testing running windows")
for i in running_windows(array,3): # 测试 窗宽 = 3
print(i)
for i in running_windows(array, 1): # 测试 窗宽 = 1
print(i)
for i in running_windows(array, 0): # 测试 窗宽 = 0
print(i)
print("Testing cycle running windows")
for i in cycle_running_windows(array, 3): # 测试 窗宽 = 3
print(i)
for i in cycle_running_windows(array, 1): # 测试 窗宽 = 1
print(i)
for i in cycle_running_windows(array, 0): # 测试 窗宽 = 0
print(i)
# test_running_windows()
def test_cycle_slice():
print("{:=^40}".format("test_cycle_slice"))
array = [0,1,2,3,4,5,6,7,8,9]
print("Testing cycle slice")
print(cycle_slice(array, 3, 6) )
print(cycle_slice(array, 6, 3) )
# test_cycle_slice()
def test_padding_shift():
print("{:=^40}".format("test_padding_shift"))
array = [1,1,1,2,2,2,2,2,4,4,4]
print(padding_left_shift(array, 1))
print(padding_right_shift(array, 1))
# test_padding_shift() | MacHu-GWU/Angora | angora/DATA/iterable.py | iterable.py | py | 12,109 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "itertools.islice",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "... |
495235347 | import glob
import os
import sqlite3
from collections import defaultdict
from contextlib import contextmanager
import six
import sqlalchemy as db
from sqlalchemy.pool import NullPool
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer
from dagster import check
from dagster.core.serdes import ConfigurableClass, ConfigurableClassData
from dagster.utils import mkdir_p
from ...pipeline_run import PipelineRunStatus
from ...sql import (
create_engine,
get_alembic_config,
handle_schema_errors,
run_alembic_upgrade,
stamp_alembic_rev,
)
from ..base import DagsterEventLogInvalidForRun
from ..schema import SqlEventLogStorageMetadata
from ..sql_event_log import SqlEventLogStorage
class SqliteEventLogStorage(SqlEventLogStorage, ConfigurableClass):
def __init__(self, base_dir, inst_data=None):
'''Note that idempotent initialization of the SQLite database is done on a per-run_id
basis in the body of connect, since each run is stored in a separate database.'''
self._base_dir = os.path.abspath(check.str_param(base_dir, 'base_dir'))
mkdir_p(self._base_dir)
self._watchers = defaultdict(dict)
self._obs = Observer()
self._obs.start()
self._inst_data = check.opt_inst_param(inst_data, 'inst_data', ConfigurableClassData)
def upgrade(self):
all_run_ids = self.get_all_run_ids()
print(
'Updating event log storage for {n_runs} runs on disk...'.format(
n_runs=len(all_run_ids)
)
)
alembic_config = get_alembic_config(__file__)
for run_id in all_run_ids:
with self.connect(run_id) as conn:
run_alembic_upgrade(alembic_config, conn, run_id)
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {'base_dir': str}
@staticmethod
def from_config_value(inst_data, config_value):
return SqliteEventLogStorage(inst_data=inst_data, **config_value)
def get_all_run_ids(self):
all_filenames = glob.glob(os.path.join(self._base_dir, '*.db'))
return [os.path.splitext(os.path.basename(filename))[0] for filename in all_filenames]
def path_for_run_id(self, run_id):
return os.path.join(self._base_dir, '{run_id}.db'.format(run_id=run_id))
def conn_string_for_run_id(self, run_id):
check.str_param(run_id, 'run_id')
return 'sqlite:///{}'.format('/'.join(self.path_for_run_id(run_id).split(os.sep)))
def _initdb(self, engine, run_id):
try:
SqlEventLogStorageMetadata.create_all(engine)
engine.execute('PRAGMA journal_mode=WAL;')
except (db.exc.DatabaseError, sqlite3.DatabaseError) as exc:
six.raise_from(DagsterEventLogInvalidForRun(run_id=run_id), exc)
alembic_config = get_alembic_config(__file__)
conn = engine.connect()
try:
stamp_alembic_rev(alembic_config, conn)
finally:
conn.close()
@contextmanager
def connect(self, run_id=None):
check.str_param(run_id, 'run_id')
conn_string = self.conn_string_for_run_id(run_id)
engine = create_engine(conn_string, poolclass=NullPool)
if not os.path.exists(self.path_for_run_id(run_id)):
self._initdb(engine, run_id)
conn = engine.connect()
try:
with handle_schema_errors(
conn,
get_alembic_config(__file__),
msg='SqliteEventLogStorage for run {run_id}'.format(run_id=run_id),
):
yield conn
finally:
conn.close()
def wipe(self):
for filename in (
glob.glob(os.path.join(self._base_dir, '*.db'))
+ glob.glob(os.path.join(self._base_dir, '*.db-wal'))
+ glob.glob(os.path.join(self._base_dir, '*.db-shm'))
):
os.unlink(filename)
def watch(self, run_id, start_cursor, callback):
watchdog = SqliteEventLogStorageWatchdog(self, run_id, callback, start_cursor)
self._watchers[run_id][callback] = (
watchdog,
self._obs.schedule(watchdog, self._base_dir, True),
)
def end_watch(self, run_id, handler):
if handler in self._watchers[run_id]:
event_handler, watch = self._watchers[run_id][handler]
self._obs.remove_handler_for_watch(event_handler, watch)
del self._watchers[run_id][handler]
class SqliteEventLogStorageWatchdog(PatternMatchingEventHandler):
def __init__(self, event_log_storage, run_id, callback, start_cursor, **kwargs):
self._event_log_storage = check.inst_param(
event_log_storage, 'event_log_storage', SqliteEventLogStorage
)
self._run_id = check.str_param(run_id, 'run_id')
self._cb = check.callable_param(callback, 'callback')
self._log_path = event_log_storage.path_for_run_id(run_id)
self._cursor = start_cursor if start_cursor is not None else -1
super(SqliteEventLogStorageWatchdog, self).__init__(patterns=[self._log_path], **kwargs)
def _process_log(self):
events = self._event_log_storage.get_logs_for_run(self._run_id, self._cursor)
self._cursor += len(events)
for event in events:
status = self._cb(event)
if status == PipelineRunStatus.SUCCESS or status == PipelineRunStatus.FAILURE:
self._event_log_storage.end_watch(self._run_id, self._cb)
def on_modified(self, event):
check.invariant(event.src_path == self._log_path)
self._process_log()
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster/core/storage/event_log/sqlite/sqlite_event_log.py | sqlite_event_log.py | py | 5,713 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sql_event_log.SqlEventLogStorage",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "dagster.core.serdes.ConfigurableClass",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 34,
"usage_type": "call"
},
{... |
72522500264 | import logging
from .models import ChangeLogTracker
logger = logging.getLogger(__name__)
class ChangeLoggerMiddleware(object):
def process_request(self, request):
try:
ChangeLogTracker.thread.request = request
except Exception as e:
logger.error(e)
| kdmukai/changelogger | changelogger/middleware.py | middleware.py | py | 299 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "models.ChangeLogTracker.thread",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.ChangeLogTracker",
"line_number": 12,
"usage_type": "name"
}
] |
41246308973 | #!/usr/bin/env python3
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
class CORSPermissiveHTTPRequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
super().end_headers()
if __name__ == "__main__":
with TCPServer(("127.0.0.1", 8000), CORSPermissiveHTTPRequestHandler) as httpd:
print("Serving...")
httpd.serve_forever()
| habitatofmatt/keyboard-reducer | tools/pyodide-serve.py | pyodide-serve.py | py | 465 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "http.server.SimpleHTTPRequestHandler",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "socketserver.TCPServer",
"line_number": 14,
"usage_type": "call"
}
] |
37708151811 | from numpy.core.numeric import normalize_axis_tuple
import pandas as pd
import numpy as np
import xarray as xr
from pandarallel import pandarallel
import time
import credentials
import tc_functions as fun
import plotting_functions as tcplt
storm_data = pd.read_csv('data/filtered_storm_list_keep-leading-5.csv')
storm_data["DATETIME"] = pd.to_datetime(storm_data["DATETIME"])
def int_circulation_storm(id, storm_data, r, normalize, plt_folder, data_folder, upper = False, plot = False):
storm = storm_data[storm_data['ID'].str.match(id)]
storm = storm.reset_index(drop = True)
int_circ = []
for index, datapoint in storm.iterrows():
year = datapoint["DATETIME"].year
month = datapoint["DATETIME"].month
day = datapoint["DATETIME"].day
hour = datapoint["DATETIME"].hour
gfs_data = fun.gfs_access(year, month, day, hour,
credentials.RDA_USER, credentials.RDA_PASSWORD)
print("Doing #" + str(index) + "/" + str(storm.shape[0]-1))
# Use upper level winds or shear?
if upper:
vws = fun.wind_stamp(datapoint['LAT'], datapoint['LON'], 800, 200, gfs_data,
vortex_rm = False, vortex_rm_rad = 650)
else:
vws = fun.shear_stamp(datapoint['LAT'], datapoint['LON'], 800, gfs_data,
vortex_rm = True, vortex_rm_rad = 650)
ic = fun.integrated_circulation(vws, r, normalize)
int_circ.append(ic) # Use this later if you want
if plot:
tcplt.two_shade_map(vws, ic,
shading = np.arange(-2.,2.,.05),
ticks = np.arange(-2.,2.,0.5),
savefile = plt_folder + id + "_" + str(index) + ".png",
legend_title = "Integrated Circulation")
np.save(data_folder + id + "_" + str(index) + ".npy", ic)
plt_folder = "/glade/work/galenv/int_circ_figs_kl5/"
data_folder = "/glade/work/galenv/int_circ_data_kl5/"
radius = 150
normalize_option = "log"
#plt_folder = "data/test/"
#data_folder = "data/test/"
unique_storms = pd.Series(np.unique(storm_data['ID']))
print("Getting GFS data warmup...")
gfs_data = fun.gfs_access(2016, 12, 12, 0, credentials.RDA_USER, credentials.RDA_PASSWORD)
print("GFS data has been gotten! On to the parallel stuff")
time.sleep(3)
print("Setting up parallel env.")
pandarallel.initialize()
print("Parallel env set up... starting parallel computations.")
unique_storms.parallel_apply(int_circulation_storm,
args = (storm_data, radius, normalize_option, plt_folder, data_folder, True, False))
print("All done!")
#unique_storms.iloc[3:7].parallel_apply(int_circulation_storm,
# args = (storm_data, radius, normalize_option, plt_folder, data_folder, False, False)) | galenvincent/tc-wind-shear | integrated_circulation.py | integrated_circulation.py | py | 2,941 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tc_functions.gfs_access",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "credential... |
12028607497 | # -*- coding: utf-8 -*-
from django.db import connections
from django.db.models.aggregates import Count
from django.utils.unittest import TestCase
from django_orm.postgresql.hstore.functions import HstoreKeys, HstoreSlice, HstorePeek
from django_orm.postgresql.hstore.expressions import HstoreExpression
from .models import DataBag, Ref, RefsBag
class TestDictionaryField(TestCase):
def setUp(self):
DataBag.objects.all().delete()
def _create_bags(self):
alpha = DataBag.objects.create(name='alpha', data={'v': '1', 'v2': '3'})
beta = DataBag.objects.create(name='beta', data={'v': '2', 'v2': '4'})
return alpha, beta
def _create_bitfield_bags(self):
# create dictionaries with bits as dictionary keys (i.e. bag5 = { 'b0':'1', 'b2':'1'})
for i in xrange(10):
DataBag.objects.create(name='bag%d' % (i,),
data=dict(('b%d' % (bit,), '1') for bit in xrange(4) if (1 << bit) & i))
def test_empty_instantiation(self):
bag = DataBag.objects.create(name='bag')
self.assertTrue(isinstance(bag.data, dict))
self.assertEqual(bag.data, {})
def test_named_querying(self):
alpha, beta = self._create_bags()
instance = DataBag.objects.get(name='alpha')
self.assertEqual(instance, alpha)
instance = DataBag.objects.filter(name='beta')[0]
self.assertEqual(instance, beta)
def test_annotations(self):
self._create_bitfield_bags()
queryset = DataBag.objects\
.annotate(num_id=Count('id'))\
.filter(num_id=1)
self.assertEqual(queryset[0].num_id, 1)
def test_unicode_processing(self):
greets = {
u'de': u'Gr\xfc\xdfe, Welt',
u'en': u'hello, world',
u'es': u'hola, ma\xf1ana',
u'he': u'\u05e9\u05dc\u05d5\u05dd, \u05e2\u05d5\u05dc\u05dd',
u'jp': u'\u3053\u3093\u306b\u3061\u306f\u3001\u4e16\u754c',
u'zh': u'\u4f60\u597d\uff0c\u4e16\u754c',
}
DataBag.objects.create(name='multilang', data=greets)
instance = DataBag.objects.get(name='multilang')
self.assertEqual(greets, instance.data)
def test_query_escaping(self):
me = self
def readwrite(s):
# try create and query with potentially illegal characters in the field and dictionary key/value
o = DataBag.objects.create(name=s, data={ s: s })
me.assertEqual(o, DataBag.objects.get(name=s, data={ s: s }))
readwrite('\' select')
readwrite('% select')
readwrite('\\\' select')
readwrite('-- select')
readwrite('\n select')
readwrite('\r select')
readwrite('* select')
def test_replace_full_dictionary(self):
DataBag.objects.create(name='foo', data={ 'change': 'old value', 'remove': 'baz'})
replacement = { 'change': 'new value', 'added': 'new'}
DataBag.objects.filter(name='foo').update(data=replacement)
instance = DataBag.objects.get(name='foo')
self.assertEqual(replacement, instance.data)
def test_equivalence_querying(self):
alpha, beta = self._create_bags()
for bag in (alpha, beta):
data = {'v': bag.data['v'], 'v2': bag.data['v2']}
instance = DataBag.objects.get(data=data)
self.assertEqual(instance, bag)
r = DataBag.objects.filter(data=data)
self.assertEqual(len(r), 1)
self.assertEqual(r[0], bag)
def test_hkeys(self):
alpha, beta = self._create_bags()
instance = DataBag.objects.filter(id=alpha.id)
self.assertEqual(instance.hkeys('data'), ['v', 'v2'])
instance = DataBag.objects.filter(id=beta.id)
self.assertEqual(instance.hkeys('data'), ['v', 'v2'])
def test_hkeys_annotation(self):
alpha, beta = self._create_bags()
queryset = DataBag.objects.annotate_functions(keys=HstoreKeys("data"))
self.assertEqual(queryset[0].keys, ['v', 'v2'])
self.assertEqual(queryset[1].keys, ['v', 'v2'])
def test_hpeek(self):
alpha, beta = self._create_bags()
queryset = DataBag.objects.filter(id=alpha.id)
self.assertEqual(queryset.hpeek(attr='data', key='v'), '1')
self.assertEqual(queryset.hpeek(attr='data', key='invalid'), None)
def test_hpeek_annotation(self):
alpha, beta = self._create_bags()
queryset = DataBag.objects.annotate_functions(peeked=HstorePeek("data", "v"))
self.assertEqual(queryset[0].peeked, "1")
self.assertEqual(queryset[1].peeked, "2")
def test_hremove(self):
alpha, beta = self._create_bags()
instance = DataBag.objects.get(name='alpha')
self.assertEqual(instance.data, alpha.data)
DataBag.objects.filter(name='alpha').hremove('data', 'v2')
instance = DataBag.objects.get(name='alpha')
self.assertEqual(instance.data, {'v': '1'})
instance = DataBag.objects.get(name='beta')
self.assertEqual(instance.data, beta.data)
DataBag.objects.filter(name='beta').hremove('data', ['v', 'v2'])
instance = DataBag.objects.get(name='beta')
self.assertEqual(instance.data, {})
def test_hslice(self):
alpha, beta = self._create_bags()
queryset = DataBag.objects.filter(id=alpha.id)
self.assertEqual(queryset.hslice(attr='data', keys=['v']), {'v': '1'})
self.assertEqual(queryset.hslice(attr='data', keys=['invalid']), {})
def test_hslice_annotation(self):
alpha, beta = self._create_bags()
queryset = DataBag.objects.annotate_functions(sliced=HstoreSlice("data", ['v']))
self.assertEqual(queryset.count(), 2)
self.assertEqual(queryset[0].sliced, {'v': '1'})
def test_hupdate(self):
alpha, beta = self._create_bags()
self.assertEqual(DataBag.objects.get(name='alpha').data, alpha.data)
DataBag.objects.filter(name='alpha').hupdate('data', {'v2': '10', 'v3': '20'})
self.assertEqual(DataBag.objects.get(name='alpha').data, {'v': '1', 'v2': '10', 'v3': '20'})
def test_key_value_subset_querying(self):
alpha, beta = self._create_bags()
for bag in (alpha, beta):
qs = DataBag.objects.where(
HstoreExpression("data").contains({'v': bag.data['v']})
)
self.assertEqual(len(qs), 1)
self.assertEqual(qs[0], bag)
qs = DataBag.objects.where(
HstoreExpression("data").contains({'v': bag.data['v'], 'v2': bag.data['v2']})
)
self.assertEqual(len(qs), 1)
self.assertEqual(qs[0], bag)
def test_multiple_key_subset_querying(self):
alpha, beta = self._create_bags()
for keys in (['v'], ['v', 'v2']):
qs = DataBag.objects.where(
HstoreExpression("data").contains(keys)
)
self.assertEqual(qs.count(), 2)
for keys in (['v', 'nv'], ['n1', 'n2']):
qs = DataBag.objects.where(
HstoreExpression("data").contains(keys)
)
self.assertEqual(qs.count(), 0)
def test_single_key_querying(self):
alpha, beta = self._create_bags()
for key in ('v', 'v2'):
qs = DataBag.objects.where(HstoreExpression("data").contains(key))
self.assertEqual(qs.count(), 2)
for key in ('n1', 'n2'):
qs = DataBag.objects.where(HstoreExpression("data").contains(key))
self.assertEqual(qs.count(), 0)
def test_nested_filtering(self):
self._create_bitfield_bags()
# Test cumulative successive filters for both dictionaries and other fields
qs = DataBag.objects.all()
self.assertEqual(10, qs.count())
qs = qs.where(HstoreExpression("data").contains({'b0':'1'}))
self.assertEqual(5, qs.count())
qs = qs.where(HstoreExpression("data").contains({'b1':'1'}))
self.assertEqual(2, qs.count())
qs = qs.filter(name='bag3')
self.assertEqual(1, qs.count())
def test_aggregates(self):
self._create_bitfield_bags()
res = DataBag.objects.where(HstoreExpression("data").contains({'b0':'1'}))\
.aggregate(Count('id'))
self.assertEqual(res['id__count'], 5)
def test_empty_querying(self):
bag = DataBag.objects.create(name='bag')
self.assertTrue(DataBag.objects.get(data={}))
self.assertTrue(DataBag.objects.filter(data={}))
self.assertTrue(DataBag.objects.where(HstoreExpression("data").contains({})))
class TestReferencesField(TestCase):
def setUp(self):
Ref.objects.all().delete()
RefsBag.objects.all().delete()
def _create_bags(self):
refs = [Ref.objects.create(name=str(i)) for i in range(4)]
alpha = RefsBag.objects.create(name='alpha', refs={'0': refs[0], '1': refs[1]})
beta = RefsBag.objects.create(name='beta', refs={'0': refs[2], '1': refs[3]})
return alpha, beta, refs
def test_empty_instantiation(self):
bag = RefsBag.objects.create(name='bag')
self.assertTrue(isinstance(bag.refs, dict))
self.assertEqual(bag.refs, {})
def test_equivalence_querying(self):
alpha, beta, refs = self._create_bags()
for bag in (alpha, beta):
refs = {'0': bag.refs['0'], '1': bag.refs['1']}
self.assertEqual(RefsBag.objects.get(refs=refs), bag)
r = RefsBag.objects.filter(refs=refs)
self.assertEqual(len(r), 1)
self.assertEqual(r[0], bag)
def test_hkeys(self):
alpha, beta, refs = self._create_bags()
self.assertEqual(RefsBag.objects.filter(id=alpha.id).hkeys(attr='refs'), ['0', '1'])
def test_hpeek(self):
alpha, beta, refs = self._create_bags()
self.assertEqual(RefsBag.objects.filter(id=alpha.id).hpeek(attr='refs', key='0'), refs[0])
self.assertEqual(RefsBag.objects.filter(id=alpha.id).hpeek(attr='refs', key='invalid'), None)
def test_hslice(self):
alpha, beta, refs = self._create_bags()
self.assertEqual(RefsBag.objects.filter(id=alpha.id).hslice(attr='refs', keys=['0']), {'0': refs[0]})
self.assertEqual(RefsBag.objects.filter(id=alpha.id).hslice(attr='refs', keys=['invalid']), {})
def test_empty_querying(self):
bag = RefsBag.objects.create(name='bag')
self.assertTrue(RefsBag.objects.get(refs={}))
self.assertTrue(RefsBag.objects.filter(refs={}))
# TODO: fix this test
#def test_key_value_subset_querying(self):
# alpha, beta, refs = self._create_bags()
# for bag in (alpha, beta):
# qs = RefsBag.objects.where(
# HstoreExpression("refs").contains({'0': bag.refs['0']})
# )
# self.assertEqual(len(qs), 1)
# self.assertEqual(qs[0], bag)
# qs = RefsBag.objects.where(
# HstoreExpression("refs").contains({'0': bag.refs['0'], '1': bag.refs['1']})
# )
# self.assertEqual(len(qs), 1)
# self.assertEqual(qs[0], bag)
def test_multiple_key_subset_querying(self):
alpha, beta, refs = self._create_bags()
for keys in (['0'], ['0', '1']):
qs = RefsBag.objects.where(HstoreExpression("refs").contains(keys))
self.assertEqual(qs.count(), 2)
for keys in (['0', 'nv'], ['n1', 'n2']):
qs = RefsBag.objects.where(HstoreExpression("refs").contains(keys))
self.assertEqual(qs.count(), 0)
def test_single_key_querying(self):
alpha, beta, refs = self._create_bags()
for key in ('0', '1'):
qs = RefsBag.objects.where(HstoreExpression("refs").contains(key))
self.assertEqual(qs.count(), 2)
for key in ('n1', 'n2'):
qs = RefsBag.objects.where(HstoreExpression("refs").contains(key))
self.assertEqual(qs.count(), 0)
| cr8ivecodesmith/django-orm-extensions-save22 | tests/modeltests/pg_hstore/tests.py | tests.py | py | 12,065 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.utils.unittest.TestCase",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.DataBag.objects.all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.DataBag.objects",
"line_number": 14,
"usage_type": "attribute"
},
{
... |
10770360159 | #! /usr/bin/python3
import sys
import logging
import cliff.app
import cliff.commandmanager
from commands.workflows import Workflows
from commands.sysconfig import SysConfig
from commands.daemons import Coordinator
from commands.daemons import Provisioner
from commands.generator import Generator
from commands.reports import Reports
from commands.status import Status
class PancancerApp(cliff.app.App):
log = logging.getLogger(__name__)
def __init__(self):
commandMgr = cliff.commandmanager.CommandManager('pancancer.app')
super(PancancerApp, self).__init__(
description='Pancancer CLI',
version='1.0',
command_manager=commandMgr,
)
commands = {
'workflows': Workflows,
'generator': Generator,
'reports': Reports,
'provisioner': Provisioner,
'coordinator': Coordinator,
'status': Status,
'sysconfig': SysConfig
}
for k, v in commands.items():
commandMgr.add_command(k, v)
def initialize_app(self, argv):
self.log.debug('initialize_app')
def prepare_to_run_command(self, cmd):
self.log.debug('prepare_to_run_command %s', cmd.__class__.__name__)
def clean_up(self, cmd, result, err):
self.log.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.log.debug('got an error: %s', err)
def main(argv=sys.argv[1:]):
app = PancancerApp()
return app.run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| ICGC-TCGA-PanCancer/cli | scripts/pancancer.py | pancancer.py | py | 1,578 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "cliff.app.app",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cliff.app",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cliff.app.commandmanag... |
74779834984 | from unittest import TestCase
from random import randint
from Common.common import rand_permutate
from collections import namedtuple
from .selection_in_linear_time import select, rand_select
from .problem_9_3 import select_variant
class TestSelection(TestCase):
def test_selection(self):
case_class = namedtuple('case_class', 'array i key expected_res')
for select_method in (rand_select, select, select_variant,):
cases = (
case_class(array=[1], i=0, key=None, expected_res=1),
case_class(array=[3, 2, 1], i=0, key=None, expected_res=1),
case_class(array=[1, 3, 5, 4, 2, 7, 6], i=4, key=None, expected_res=5),
case_class(array=[1, 3, 5, 4, 2, 7, 6], i=2, key=None, expected_res=3),
case_class(array=[1, 3, 5, 4, 2, 7, 6], i=6, key=lambda x: -x, expected_res=1),
case_class(array=[8, 3, 2, 4, 6, 9, 7, 5, 1], i=0, key=None, expected_res=1),
case_class(array=[16, 196, 64, 121, 144, 9, 36, 0, 49, 100, 4, 81, 169, 1, 25], i=4, key=None,
expected_res=16),
case_class(array=[1, 16, 4, 9, 49, 100, 25, 36, 81, 64, 0], i=0, key=None, expected_res=0),
)
for case in cases:
# print(case.array, case.i)
self.assertEqual(case.expected_res, select_method(case.array, case.i, case.key))
for length in range(1, 100):
i = randint(0, length - 1)
array = [x * x for x in range(0, length)]
rand_permutate(array)
case = case_class(array=array, i=i, key=None, expected_res=i * i)
# print(case.array, case.i)
self.assertEqual(case.expected_res, select_method(case.array, case.i, case.key)) | GarfieldJiang/CLRS | P2_Sorting/OrderStatistics/test_selection.py | test_selection.py | py | 1,825 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "collections.namedtuple",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selection_in_linear_time.rand_select",
"line_number": 12,
"usage_type": "name"
},
{
"api_... |
7704126003 | # -*- encoding: utf-8 -*-
import networkx as nx
from tools import const
# 如果没有label_type,标注为“default”
def load_data(graph_name: str, label_type: str) -> (nx.Graph, dict):
if const.System == "Windows":
edge_path = const.WindowsRootPath + "\data\graph\{}.edgelist".format(graph_name)
if label_type == "default":
label_path = const.WindowsRootPath + "\data\label\{}.label".format(graph_name)
else:
label_path = const.WindowsRootPath + "\data\label\{}_{}.label".format(graph_name, label_type)
elif const.System == "Linux":
edge_path = const.LinuxRootPath + "/data/graph/{}.edgelist".format(graph_name)
if label_type == "default":
label_path = const.LinuxRootPath + "/data/label/{}.label".format(graph_name)
else:
label_path = const.LinuxRootPath + "/data/label/{}_{}.label".format(graph_name, label_type)
else:
raise EnvironmentError("only support Windows and Linux")
label_dict = read_label(label_path)
graph = nx.read_edgelist(path=edge_path, create_using=nx.Graph, nodetype=str,
edgetype=float, data=[('weight', float)])
return graph, label_dict
def load_data_from_distance(graph_name, label_name, metric, hop, scale, multi="no", directed=False):
"""
Loda graph data by dataset name.
:param graph_name: graph name, e.g. mkarate
:param label_name: label name, e.g. mkarate_origin
:param directed: bool, if True, return directed graph.
:return: graph, node labels, number of node classes.
"""
if multi == "yes":
edge_path = "../distance/{}/HSD_multi_{}_hop{}.edgelist".format(
graph_name, metric, hop)
else:
edge_path = "../distance/{}/HSD_{}_scale{}_hop{}.edgelist".format(
graph_name, metric, scale, hop)
label_path = f"../data/label/{graph_name}.label"
label_dict, n_class = read_label(label_path)
if directed:
graph = nx.read_edgelist(path=edge_path, create_using=nx.DiGraph,
edgetype=float, data=[('weight', float)])
else:
graph = nx.read_edgelist(path=edge_path, create_using=nx.Graph,
edgetype=float, data=[('weight', float)])
return graph, label_dict, n_class
def read_label(path) -> dict:
"""
read graph node labels.
format:
str(node): int(label)
"""
label_dict = dict()
try:
with open(path, mode="r", encoding="utf-8") as fin:
while True:
line = fin.readline().strip()
if not line:
break
node, label = line.split(" ")
label_dict[node] = int(label)
except FileNotFoundError as e:
print(f"Lable file not exist, path:{path}")
finally:
return label_dict
| Sngunfei/HSD | tools/dataloader.py | dataloader.py | py | 2,898 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "tools.const.System",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tools.const",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tools.const.WindowsRootPath",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "too... |
40281118137 | import os
import time
import math
import numpy as np
import torch
import copy
from skimage import img_as_float32
import im_utils
from unet3d import UNet3D
from file_utils import ls
from torch.nn.functional import softmax
import torch.nn.functional as F
cached_model = None
cached_model_path = None
use_fake_cnn = False
def fake_cnn(tiles_for_gpu):
""" Useful debug function for checking tile layout etc """
output = []
for t in tiles_for_gpu:
v = t[0, 17:-17, 17:-17, 17:-17].data.cpu().numpy()
v_mean = np.mean(v)
output.append((v > v_mean).astype(np.int8))
return np.array(output)
def get_latest_model_paths(model_dir, k):
fnames = ls(model_dir)
fnames = sorted(fnames)[-k:]
fpaths = [os.path.join(model_dir, f) for f in fnames]
return fpaths
def load_model(model_path, classes):
global cached_model
global cached_model_path
# using cache can save up to half a second per segmentation with network drives
if model_path == cached_model_path:
return copy.deepcopy(cached_model)
# two channels as one is input image and another is some of the fg and bg annotation
# each non-empty channel in the annotation is included with 50% chance.
# Option1 - fg and bg will go in as seprate channels
# so channels are [image, fg_annot, bg_annot]
# Option2 -
# when included both fg a bg go into the model bg is -1 and fg is +1. undefined is 0
# Option 1 will be evaluated first (possibilty easier to implement)
model = UNet3D(classes, im_channels=3)
try:
model.load_state_dict(torch.load(model_path))
model = torch.nn.DataParallel(model)
# pylint: disable=broad-except, bare-except
except:
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(model_path))
if not use_fake_cnn:
model.cuda()
# store in cache as most frequest model is laoded often
cached_model_path = model_path
cached_model = model
return copy.deepcopy(model)
def random_model(classes):
# num out channels is twice number of channels
# as we have a positive and negative output for each structure.
model = UNet3D(classes, im_channels=3)
model = torch.nn.DataParallel(model)
if not use_fake_cnn:
model.cuda()
return model
def create_first_model_with_random_weights(model_dir, classes):
# used when no model was specified on project creation.
model_num = 1
model_name = str(model_num).zfill(6)
model_name += '_' + str(int(round(time.time()))) + '.pkl'
model = random_model(classes)
model_path = os.path.join(model_dir, model_name)
torch.save(model.state_dict(), model_path)
if not use_fake_cnn:
model.cuda()
return model
def get_prev_model(model_dir, classes):
prev_path = get_latest_model_paths(model_dir, k=1)[0]
prev_model = load_model(prev_path, classes)
return prev_model, prev_path
def save_if_better(model_dir, cur_model, prev_model_path, cur_dice, prev_dice):
# convert the nans as they don't work in comparison
if math.isnan(cur_dice):
cur_dice = 0
if math.isnan(prev_dice):
prev_dice = 0
print('Validation: prev dice', str(round(prev_dice, 5)).ljust(7, '0'),
'cur dice', str(round(cur_dice, 5)).ljust(7, '0'))
if cur_dice > prev_dice:
save_model(model_dir, cur_model, prev_model_path)
return True
return False
def save_model(model_dir, cur_model, prev_model_path):
prev_model_fname = os.path.basename(prev_model_path)
prev_model_num = int(prev_model_fname.split('_')[0])
model_num = prev_model_num + 1
now = int(round(time.time()))
model_name = str(model_num).zfill(6) + '_' + str(now) + '.pkl'
model_path = os.path.join(model_dir, model_name)
print('saving', model_path, time.strftime('%H:%M:%S', time.localtime(now)))
torch.save(cur_model.state_dict(), model_path)
def ensemble_segment_3d(model_paths, image, fname, batch_size, in_w, out_w, in_d,
out_d, classes):
""" Average predictions from each model specified in model_paths """
t = time.time()
input_image_shape = image.shape
cnn = load_model(model_paths[0], classes)
in_patch_shape = (in_d, in_w, in_w)
out_patch_shape = (out_d, out_w, out_w)
depth_diff = in_patch_shape[0] - out_patch_shape[0]
height_diff = in_patch_shape[1] - out_patch_shape[1]
width_diff = in_patch_shape[2] - out_patch_shape[2]
# pad so seg will be size of input image
image = im_utils.pad_3d(image, width_diff//2, depth_diff//2,
mode='reflect', constant_values=0)
# segment returns a series of prediction maps. one for each class.
pred_maps = segment_3d(cnn, image, batch_size, in_patch_shape, out_patch_shape)
assert pred_maps[0].shape == input_image_shape
print('time to segment image', time.time() - t)
return pred_maps
def segment_3d(cnn, image, batch_size, in_tile_shape, out_tile_shape):
"""
in_tile_shape and out_tile_shape are (depth, height, width)
"""
# Return prediction for each pixel in the image
# The cnn will give a the output as channels where
# each channel corresponds to a specific class 'probability'
# don't need channel dimension
# make sure the width, height and depth is at least as big as the tile.
assert len(image.shape) == 3, str(image.shape)
assert image.shape[0] >= in_tile_shape[0], f"{image.shape[0]},{in_tile_shape[0]}"
assert image.shape[1] >= in_tile_shape[1], f"{image.shape[1]},{in_tile_shape[1]}"
assert image.shape[2] >= in_tile_shape[2], f"{image.shape[2]},{in_tile_shape[2]}"
depth_diff = in_tile_shape[0] - out_tile_shape[0]
width_diff = in_tile_shape[1] - out_tile_shape[1]
out_im_shape = (image.shape[0] - depth_diff,
image.shape[1] - width_diff,
image.shape[2] - width_diff)
coords = im_utils.get_coords_3d(out_im_shape, out_tile_shape)
coord_idx = 0
class_output_tiles = None # list of tiles for each class
while coord_idx < len(coords):
tiles_to_process = []
coords_to_process = []
for _ in range(batch_size):
if coord_idx < len(coords):
coord = coords[coord_idx]
x_coord, y_coord, z_coord = coord
tile = image[z_coord:z_coord+in_tile_shape[0],
y_coord:y_coord+in_tile_shape[1],
x_coord:x_coord+in_tile_shape[2]]
# need to add channel dimension for GPU processing.
tile = np.expand_dims(tile, axis=0)
assert tile.shape[1] == in_tile_shape[0], str(tile.shape)
assert tile.shape[2] == in_tile_shape[1], str(tile.shape)
assert tile.shape[3] == in_tile_shape[2], str(tile.shape)
tile = img_as_float32(tile)
tile = im_utils.normalize_tile(tile)
coord_idx += 1
tiles_to_process.append(tile) # need channel dimension
coords_to_process.append(coord)
tiles_to_process = np.array(tiles_to_process)
tiles_for_gpu = torch.from_numpy(tiles_to_process)
tiles_for_gpu = tiles_for_gpu.cuda()
# TODO: consider use of detach.
# I might want to move to cpu later to speed up the next few operations.
# I added .detach().cpu() to prevent a memory error.
# pad with zeros for the annotation input channels
# l,r, l,r, but from end to start w w h h d d, c, c, b, b
tiles_for_gpu = F.pad(tiles_for_gpu, (0, 0, 0, 0, 0, 0, 0, 2), 'constant', 0)
# tiles shape after padding torch.Size([4, 3, 52, 228, 228])
outputs = cnn(tiles_for_gpu).detach().cpu()
# bg channel index for each class in network output.
class_idxs = [x * 2 for x in range(outputs.shape[1] // 2)]
if class_output_tiles is None:
class_output_tiles = [[] for _ in class_idxs]
for i, class_idx in enumerate(class_idxs):
class_output = outputs[:, class_idx:class_idx+2]
# class_output : (batch_size, bg/fg, depth, height, width)
softmaxed = softmax(class_output, 1)
foreground_probs = softmaxed[:, 1] # just the foreground probability.
predicted = foreground_probs > 0.5
predicted = predicted.int()
pred_np = predicted.data.cpu().numpy()
for out_tile in pred_np:
class_output_tiles[i].append(out_tile)
class_pred_maps = []
for i, output_tiles in enumerate(class_output_tiles):
# reconstruct for each class
reconstructed = im_utils.reconstruct_from_tiles(output_tiles,
coords, out_im_shape)
class_pred_maps.append(reconstructed)
return class_pred_maps
| YZST/RootPainter3D | trainer/model_utils.py | model_utils.py | py | 8,983 | python | en | code | null | github-code | 36 | [
{
"api_name": "numpy.mean",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.int8",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "file_utils.ls",
"line_numb... |
25867245413 | #test
import os
import yaml
from ..grid import Grid
import numpy as np
def load_test_data(yml_file_path):
"""Given a file path of the yaml file, return the data in the file."""
with open(yml_file_path, 'r') as f:
s = f.read()
test_data = yaml.load(s)
return test_data
def test_vertices():
#yml_files = ['grid1.yml']
yml_file_path = os.path.join(os.path.dirname(__file__), 'fixture', 'grid_1.yml')
test_data = load_test_data(yml_file_path)
ans = test_data.pop('ans')
ans_vertice = ans['vertice']
grid = Grid.from_vtk_file('example.vtk')
v = grid.vertices[1]
(v == ans_vertice).all
def test_elements():
yml_file_path = os.path.join(os.path.dirname(__file__), 'fixture', 'grid_1.yml')
test_data = load_test_data(yml_file_path)
ans = test_data.pop('ans')
ans_element = ans['element']
grid = Grid.from_vtk_file('example.vtk')
e = grid.elements[1]
(e == ans_element).all
def test_corners():
yml_file_path = os.path.join(os.path.dirname(__file__), 'fixture', 'grid_1.yml')
test_data = load_test_data(yml_file_path)
ans = test_data.pop('ans')
ans_corner = ans['corner']
grid = Grid.from_vtk_file('example.vtk')
c = grid.get_corners(1)
(c == ans_corner).all
# if __name__ == '__main__':
# test_vertices()
# # test_wrong()
| uceclz0/mesh_generation | mesh_generation/test/grid_test.py | grid_test.py | py | 1,354 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yaml.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_numbe... |
25404280105 | # -*- coding: utf-8 -*-
import torch
from model import Model
from utils import load_img,mkdir
import os
import argparse
import cv2
import time
from glob import glob
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data', default=r'/data/Disk_B/MSCOCO2014/train2014', type=str, help='')
parser.add_argument('--load_pt', default=True, type=bool, help='')
parser.add_argument('--weights_path', default='./weights/epoch323_fusion.pt', type=str, help='')
parser.add_argument('--lr', default= 1e-3, type=float, help='')
parser.add_argument('--devices', default="0", type=str, help='')
parser.add_argument('--device', default="cuda", type=str, help='')
parser.add_argument('--batch_size', default=32, type=int, help='')
parser.add_argument('--epochs', default=1000, type=int, help='')
parser.add_argument('--multiGPU', default=False, type=bool, help='')
parser.add_argument('--GPUs', default=[0, 1], type=list, help='')
return parser.parse_args()
def getimg(imgir_path):
img = load_img(imgir_path)
with torch.no_grad():
model.setdata(img)
s_time = time.time()
model.forward(isTest=True)
e_time = time.time() - s_time
print(e_time)
# model.saveimgfuse(imgir_path)
return model.getimg()
def sm(x,y):
ex = torch.exp(x)
ey = torch.exp(y)
s = ex+ey
return x*ex/s +y*ey/s
if __name__ == "__main__":
save_path = "result"
test_ir = './Test_ir/'
test_vi = './Test_vi/'
# test_ir = './road/ir/'
# test_vi = './road/vi/'
img_list_ir = glob(test_ir + '*')
img_num = len(img_list_ir)
imgtype = '.bmp'
args = parse_args()
os.chdir(r'./')
os.environ["CUDA_VISIBLE_DEVICES"] = args.devices
model = Model(args).to(args.device)
model.eval()
for i in range(1,img_num+1):
imgir_path = test_ir+str(i)+imgtype
imgvi_path = test_vi+str(i)+imgtype
vi_g1, vi_g2, vi_g3, vi_s = getimg(imgir_path)
ir_g1, ir_g2, ir_g3, ir_s = getimg(imgvi_path)
fused_1 = torch.max(vi_g1, ir_g1) + torch.max(vi_g2, ir_g2) + torch.max(vi_g3, ir_g3) + (vi_s + ir_s) / 2
fused_2 = torch.max(vi_g1, ir_g1) + torch.max(vi_g2, ir_g2) + torch.max(vi_g3, ir_g3) + sm(vi_s, ir_s)
fused_3 = vi_g1 + ir_g1 + vi_g2 + ir_g2 + vi_g3 + ir_g3 + (vi_s + ir_s) / 2
fused_4 = vi_g1 + ir_g1 + vi_g2 + ir_g2 + vi_g3 + ir_g3 + sm(vi_s, ir_s)
fused_1 = fused_1.squeeze(0).squeeze(0).detach().cpu().numpy() * 255
fused_2 = fused_2.squeeze(0).squeeze(0).detach().cpu().numpy() * 255
fused_3 = fused_3.squeeze(0).squeeze(0).detach().cpu().numpy() * 255
fused_4 = fused_4.squeeze(0).squeeze(0).detach().cpu().numpy() * 255
save_path_1 = os.path.join(save_path, 'fuse1')
mkdir(save_path_1)
save_name_1 = os.path.join(save_path_1, '{}.bmp'.format(i))
cv2.imwrite(save_name_1, fused_1)
save_path_2 = os.path.join(save_path, 'fuse2')
mkdir(save_path_2)
save_name_2 = os.path.join(save_path_2, '{}.bmp'.format(i))
cv2.imwrite(save_name_2, fused_2)
save_path_3 = os.path.join(save_path, 'fuse3')
mkdir(save_path_3)
save_name_3 = os.path.join(save_path_3, '{}.bmp'.format(i))
cv2.imwrite(save_name_3, fused_3)
save_path_4 = os.path.join(save_path, 'fuse4')
mkdir(save_path_4)
save_name_4 = os.path.join(save_path_4, '{}.bmp'.format(i))
cv2.imwrite(save_name_4, fused_4)
print("pic:[%d] %s" % (i, save_name_1))
| thfylsty/ImageFusion_DeepDecFusion | fuseimg.py | fuseimg.py | py | 3,582 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.load_img",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "model.setdata",
... |
19524023566 | from django.contrib import messages
from django.shortcuts import render,redirect
from students.models import student_homework,StudentFee,Attendance
from Accounts.models import UserAuthentication,Teacher
from datetime import datetime as dt
from pytz import timezone
# Create your views here.
def parent_home(request):
if request.method == "GET":
tutions = Teacher.objects.all()
return render(request,'parent_home.html',{'tutions':tutions})
else:
messages.warning(request,'Bad Request on parent home...please try again')
return redirect('/accounts/login/')
def show_students_data(request):
if request.method == "GET":
student_key = request.GET.get('parent_contact','')
if student_key == '':
messages.error(request,'Parent Mobile Number is required...got empty')
return redirect('/parents/home/')
else:
if student_homework.objects.filter(username = student_key).exists():
student_data = student_homework.objects.get(username = student_key)
att = []
fee = []
teacher = Teacher.objects.get(teacher = student_data.teacher)
if Attendance.objects.filter(email = student_data).exists():
att = Attendance.objects.filter(email = student_data)
if StudentFee.objects.filter(email = student_key).exists():
fee = StudentFee.objects.filter(email = student_key)
td = dt.now(timezone('Asia/Kolkata')).date()
if teacher.notice_expiry == None:
notice = False
elif td >= teacher.notice_expiry:
notice = False
else:
notice = True
return render(request,'parent_student_data.html',{
'student':student_data,
'att':att,
'fee':fee,
'teacher' : teacher,
'notice' : notice
})
else:
messages.error(request,'Your Mobile Number not associated with any tution..please check with your tution teacher')
return redirect('/parents/home/')
else:
messages.error(request,'Only POST requests are accepted..please try again')
return redirect('/parents/home/')
| Asif-Ali1234/tution_management | parents/views.py | views.py | py | 2,388 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Accounts.models.Teacher.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Accounts.models.Teacher.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "Accounts.models.Teacher",
"line_number": 11,
"usage_type": "name... |
25022706363 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, argparse, json
from common.utils import use_progressbar, count_file
from smali_opcode import HCG_FILE_NAME
def merge_hash_dict(d1, d2):
'''merge d1 into d2'''
# 1. iterate through all keys of d1
# 2. check if the key is also in d2
# 2.1 yes, place the key with the maximum value in d2
# 2.2 no, add the key-value pair into d2
for node in d1:
if node in d2:
d2[node] = max(d1[node], d2[node])
else:
d2[node] = d1[node]
return d2
def get_hash(hcgpath):
'''get hash values from a hcg.json file'''
# 1. load the hcg into a dictionary
# 2. iterate through all the nodes of the hcg
# 3. make hash values keys of another dictionary
# while the values is its occurrence
# Load hcg
f = open(hcgpath, 'r')
hcg = json.load(f)
f.close()
# Iterate through all nodes
hash_dict = dict()
for node in hcg:
if hcg[node]['nhash'] not in hash_dict:
hash_dict[hcg[node]['nhash']] = 1
else:
hash_dict[hcg[node]['nhash']] += 1
return hash_dict
def count(directory):
'''count all the indivisual hash values'''
# 1. iterate through all the hcg.json files
# 2. get hash values from a hcg.json file
# 3. merge the hash values into one file
# progressbar
file_count = count_file(directory, HCG_FILE_NAME)
pbar = use_progressbar('Calculating maximum occurrence', file_count)
pbar.start()
progress = 0
hash_dict = dict()
for parent, dirnames, filenames in os.walk(directory):
for filename in filenames:
if filename == HCG_FILE_NAME:
# if filename == 'hcg.json':
hash_dict = merge_hash_dict(get_hash(os.path.join(parent, filename)), hash_dict)
# progressbar
progress += 1
pbar.update(progress)
# progressbar
pbar.finish()
return hash_dict
def has_hash_and_occurrence(hash_dict, hash_value, occurrence):
if hash_value in hash_dict:
if hash_dict[hash_value] == occurrence:
return True
return False
def find(directory):
'''find the file with the specific occurrence of given hash value'''
# 1. iterate through all the hcg.json files
# 2. get hash values from a hcg.json file
# 3. compare the hash values with the given ones
hash_value = '0100000000000000000'
occurrence = 2302
for parent, dirnames, filenames in os.walk(directory):
for filename in filenames:
if filename == 'directed_hcg.json':
hash_dict = get_hash(os.path.join(parent, filename))
if has_hash_and_occurrence(hash_dict, hash_value, occurrence):
print(os.path.join(parent, filename))
def save_to_file(hash_dict, directory):
# Dump hash_dict to json file
f = open(os.path.join(directory, 'directed_hash_occurrence.json'), 'w')
# f = open(os.path.join(directory, 'hash_occurrence.json'), 'w')
json.dump(hash_dict, f)
f.close()
print('[SC]All hash values stored in /%s/hash_occurrence.json' % directory)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', help='directory of the apk')
parser.add_argument('-m', '--mode', help='0 = count hash; 1 = find hash')
args = parser.parse_args()
if args.directory and args.mode:
if args.mode == '0':
hash_dict = count(args.directory)
save_to_file(hash_dict, args.directory)
elif args.mode == '1':
find(args.directory)
else:
parser.print_help()
else:
parser.print_help()
if __name__ == '__main__':
main() | chushu10/StructuralClassification | count_hash.py | count_hash.py | py | 3,759 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "common.utils.count_file",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "smali_opcode.HCG_FILE_NAME",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "comm... |
17498679617 | import logging
import numpy as np
import sys
import warnings
import affine6p
import geopandas
from typing import List, Optional
from shapely.geometry import Polygon
import geoCosiCorr3D.georoutines.geo_utils as geoRT
import geoCosiCorr3D.geoErrorsWarning.geoErrors as geoErrors
from geoCosiCorr3D.geoCore.core_RFM import RawRFM
class ReadRFM(RawRFM):
def __init__(self, rfm_file):
super().__init__()
self.rfm_file = rfm_file
self._ingest()
def _ingest(self):
if self.rfm_file.endswith('xml') or self.rfm_file.endswith('XML'):
logging.info("RFM file format: xml")
self.RFM_Read_fromXML(self.rfm_file)
elif self.rfm_file.lower().endswith('RPB'):
logging.info("RFM file format: RPB")
self.RFM_Read_fromRPB(self.rfm_file)
elif self.rfm_file.lower().endswith(tuple(("txt", "TXT", "rpc"))):
# print("RFM file format: txt")
self.RFM_Read_fromTXT(self.rfm_file)
elif self.rfm_file.endswith(tuple(('TIF', 'NTF', "tif", "ntf", "JP2"))):
logging.info("RFM file format: Raster")
self.RFM_Read_fromRaster(self.rfm_file)
else:
try:
self.RFM_Read_fromTXT(self.rfm_file)
except:
raise IOError(f'RFM file:{self.rfm_file} is not valid')
def parse_file(self, param, lines):
from re import search
val = None
# print(param)
for line_ in lines:
if search(param, line_):
val = float(line_.split(":")[1].split()[0])
if val == None:
msg = "ERROR in reading " + param + " from RFM txt file!"
sys.exit(msg)
return val
def RFM_Read_fromTXT(self, rfm_txt_file):
with open(rfm_txt_file) as f:
fileContent = f.read()
lines = fileContent.split('\n')
self.linOff = self.parse_file(param="LINE_OFF", lines=lines)
self.colOff = self.parse_file(param="SAMP_OFF", lines=lines)
self.latOff = self.parse_file(param="LAT_OFF", lines=lines)
self.lonOff = self.parse_file(param="LONG_OFF", lines=lines)
self.altOff = self.parse_file(param="HEIGHT_SCALE", lines=lines)
self.linScale = self.parse_file(param="LINE_SCALE", lines=lines)
self.colScale = self.parse_file(param="SAMP_SCALE", lines=lines)
self.latScale = self.parse_file(param="LAT_SCALE", lines=lines)
self.lonScale = self.parse_file(param="LONG_SCALE", lines=lines)
self.altScale = self.parse_file(param="HEIGHT_SCALE", lines=lines)
### Inverse model
for i in range(20):
self.linNum[i] = self.parse_file(param="LINE_NUM_COEFF_" + str(i + 1) + ":", lines=lines)
self.linDen[i] = self.parse_file(param="LINE_DEN_COEFF_" + str(i + 1) + ":", lines=lines)
self.colNum[i] = self.parse_file(param="SAMP_NUM_COEFF_" + str(i + 1) + ":", lines=lines)
self.colDen[i] = self.parse_file(param="SAMP_DEN_COEFF_" + str(i + 1) + ":", lines=lines)
# print(self.linNum)
# TODO: check for direct model
return
def RFM_Read_fromXML(self, rfm_xml_file):
# TODO
logging.info("--- Read RFM form xML ---")
logging.info("--- Future work ---")
geoErrors.erNotImplemented(routineName="Read RFM from XML")
return
def RFM_Read_fromRPB(self, rpb_file):
# TODO
logging.info("--- Read RFM form RPB ---")
logging.info("--- Future work ---")
geoErrors.erNotImplemented(routineName="Read RFM from RPB")
return
def RFM_Read_fromRaster(self, raster_file):
## Read the RPC coefficent from raster tag using GDAL and georoutines.
rasterInfo = geoRT.cRasterInfo(raster_file)
if rasterInfo.rpcs:
rfmInfo = rasterInfo.rpcs
# print("RFM info :", rfmInfo)
## Scale and offset
self.altOff = float(rfmInfo["HEIGHT_OFF"])
self.altScale = float(rfmInfo["HEIGHT_SCALE"])
self.latOff = float(rfmInfo["LAT_OFF"])
self.latScale = float(rfmInfo["LAT_SCALE"])
self.lonOff = float(rfmInfo["LONG_OFF"])
self.lonScale = float(rfmInfo["LONG_SCALE"])
self.linOff = float(rfmInfo["LINE_OFF"])
self.linScale = float(rfmInfo["LINE_SCALE"])
self.colOff = float(rfmInfo["SAMP_OFF"])
self.colScale = float(rfmInfo["SAMP_SCALE"])
## Inverse model
self.linNum = list(map(float, rfmInfo['LINE_NUM_COEFF'].split()))
self.linDen = list(map(float, rfmInfo['LINE_DEN_COEFF'].split()))
self.colNum = list(map(float, rfmInfo['SAMP_NUM_COEFF'].split()))
self.colDen = list(map(float, rfmInfo['SAMP_DEN_COEFF'].split()))
## Direct model
if 'LON_NUM_COEFF' in rfmInfo:
self.lonNum = list(map(float, rfmInfo['LON_NUM_COEFF'].split()))
self.lonDen = list(map(float, rfmInfo['LON_DEN_COEFF'].split()))
self.latNum = list(map(float, rfmInfo['LAT_NUM_COEFF'].split()))
self.latDen = list(map(float, rfmInfo['LAT_DEN_COEFF'].split()))
else:
sys.exit(f'RPCs not found in the raster {raster_file} metadata')
return
class RFM(ReadRFM):
def __init__(self, rfm_file: Optional[str] = None, debug: bool = False):
self.init_RFM()
if rfm_file is not None:
super().__init__(rfm_file)
self.debug = debug
if self.debug:
logging.info(self.__repr__())
def Ground2Img_RFM(self, lon, lat, alt: List = None, normalized=False, demInfo=None, corrModel=np.zeros((3, 3))):
"""
Apply inverse RFM model to convert Ground coordinates to image coordinates
Args:
lon: longitude(s) of the input 3D point(s) : float or list
lat: latitude(s) of the input 3D point(s) : float or list
alt: altitude(s) of the input 3D point(s) : float or list
corrModel
Returns:
float or list: horizontal image coordinate(s) (column index, ie x)
float or list: vertical image coordinate(s) (row index, ie y)
"""
if alt is None:
alt = []
lon = np.asarray(lon)
lat = np.asarray(lat)
if np.array(alt).any() == True:
alt = np.asarray(alt)
else:
if demInfo is not None:
warnings.warn("INTERPOLATE FROM DEM --> TODO")
logging.warning("INTERPOLATE FROM DEM --> TODO")
else:
warnings.warn("NO alt values and no DEM: alt will be set to:{}".format(self.altOff))
logging.warning("NO alt values and no DEM: alt will be set to:{}".format(self.altOff))
alt = np.ones(lon.shape) * self.altOff
lonN = (lon - self.lonOff) / self.lonScale
latN = (lat - self.latOff) / self.latScale
altN = (alt - self.altOff) / self.altScale
colN = self.build_RFM(num=self.colNum, den=self.colDen, x=latN, y=lonN, z=altN)
linN = self.build_RFM(num=self.linNum, den=self.linDen, x=latN, y=lonN, z=altN)
if not np.all((corrModel == 0)):
colN, linN = self.apply_correction(corrModel=corrModel, colN=colN, linN=linN)
if normalized == True:
return colN, linN
else:
col = colN * self.colScale + self.colOff
row = linN * self.linScale + self.linOff
return col, row
def Img2Ground_RFM(self, col, lin,
altIni: Optional[List] = None,
demInfo: Optional[geoRT.cRasterInfo] = None,
corrModel=np.zeros((3, 3)),
normalized=False):
"""
Apply direct RFM model to convert image coordinates to ground coordinates
Args:
col: x-image coordinate(s) of the input point(s) : float or list
lin: y-image coordinate(s) of the input point(s) : float or list
altIni: altitude(s) of the input point(s) : float or list
normalized:
Returns: float or list: longitude(s) && float or list: latitude(s)
"""
if altIni is None:
altIni = []
if isinstance(altIni, list):
if len(altIni) == 0:
if isinstance(col, list) and isinstance(lin, list):
altIni = len(col) * [self.altOff]
else:
altIni = self.altOff
elif len(altIni) != len(col) or len(altIni) != len(lin):
ValueError("Invalid Initial Altitude values !")
col = np.asarray(col)
lin = np.asarray(lin)
altIni_ = np.asarray(altIni)
# Normalize input image coordinates
colN = (col - self.colOff) / self.colScale
linN = (lin - self.linOff) / self.linScale
altIniN = (altIni_ - self.altOff) / self.altScale
if self.lonNum == [np.nan] * 20:
if self.debug:
logging.warning("Computing Direct model ....")
# print("correction matrix:\n", corrModel)
# print("colN,linN,altN", colN, linN, altN)
lonN, latN = self.ComputeDirectModel(colN=colN, linN=linN, altN=altIniN, corrModel=corrModel)
else:
# print("Direct model provided in the RFM file will be used")
lonN = self.build_RFM(num=self.lonNum, den=self.lonDen, x=linN, y=colN, z=altIniN)
latN = self.build_RFM(num=self.latNum, den=self.latDen, x=linN, y=colN, z=altIniN)
if not normalized:
lon = lonN * self.lonScale + self.lonOff
lat = latN * self.latScale + self.latOff
# print(lon, lat, altIni)
# ==== Apply correction if exist =====
# if not np.all((modelCorr == 0)):
# lon, lat, altIni = ApplyCorrection(lon=lon, lat=lat, alt=altIni, col=col, lin=lin, modelCorr=modelCorr)
if isinstance(altIni, list):
alt = altIni
else:
alt = altIni
### Here we will use the computed lon & lat to interpolate the alt from the DEM if exist
if demInfo is not None:
alt = []
# TODO: loop until convergence or no change in coordinates
if isinstance(lon, np.ndarray) and isinstance(lat, np.ndarray):
for lonVal, latVal, altValIni in zip(lon, lat, altIni):
altVal = self.ExtractAlt(lonVal, latVal, demInfo)
if altVal == 0:
altVal = altValIni
alt.append(altVal)
else:
altVal = self.ExtractAlt(lon, lat, demInfo)
if altVal == 0:
altVal = altIni
alt = altVal
alt = np.asarray(alt)
# Normalize input image coordinates
colN = (col - self.colOff) / self.colScale
linN = (lin - self.linOff) / self.linScale
altN = (alt - self.altOff) / self.altScale
if self.lonNum == [np.nan] * 20:
# print("Computing Direct model ....")
# print("colN,linN,altN", colN, linN, altN)
lonN, latN = self.ComputeDirectModel(colN=colN, linN=linN, altN=altN, corrModel=corrModel)
else:
# print("Direct model provided in the RFM file will be used")
lonN = self.build_RFM(num=self.lonNum, den=self.lonDen, x=linN, y=colN, z=altN)
latN = self.build_RFM(num=self.latNum, den=self.latDen, x=linN, y=colN, z=altN)
lon = lonN * self.lonScale + self.lonOff
lat = latN * self.latScale + self.latOff
# lon, lat, alt = ApplyCorrection(lon=lon, lat=lat, alt=alt, col=col, lin=lin, modelCorr=modelCorr)
return lon, lat, alt
else:
return lonN, latN, None
def get_geoTransform(self):
h = int(self.linOff * 2)
w = int(self.colOff * 2)
BBoxPix = [[0, 0],
[0, h],
[w, h],
[w, 0],
[0, 0]]
z = self.altOff
lons, lats, _ = self.Img2Ground_RFM(col=[0, 0, w, w, 0],
lin=[0, h, h, 0, 0],
altIni=[z, z, z, z, z],
normalized=False)
BBoxMap = []
for lon_, lat_ in zip(lons, lats):
BBoxMap.append([lon_, lat_])
trans = affine6p.estimate(origin=BBoxPix, convrt=BBoxMap)
mat = trans.get_matrix() ## Homogenious represention of the affine transformation
geoTrans_h = np.array(mat)
geo_transform = [mat[0][-1], mat[0][0], mat[0][1], mat[1][-1], mat[1][0], mat[1][1]]
return geo_transform
def compute_footprint(self, corr_model: Optional[np.ndarray] = None,
dem_info: Optional[geoRT.cRasterInfo] = None) -> [Polygon, geopandas.GeoDataFrame]:
h = int(self.linOff * 2)
w = int(self.colOff * 2)
z = self.altOff
if corr_model is None:
corr_model = np.zeros((3, 3))
lons, lats, _ = self.Img2Ground_RFM(col=[0, 0, w, w, 0],
lin=[0, h, h, 0, 0],
altIni=[z, z, z, z, z],
normalized=False,
corrModel=corr_model,
demInfo=dem_info)
fp_poly_geom = Polygon(zip(lons, lats))
gpd_polygon = geopandas.GeoDataFrame(index=[0], crs='epsg:4326', geometry=[fp_poly_geom])
return fp_poly_geom, gpd_polygon
def get_GSD(self):
h = self.linOff * 2
w = self.colOff * 2
## Estimate GSD from RFM
center = (int(h / 2), int(w / 2))
center_plus = (center[0] + 1, center[1] + 1)
prjCenter = self.Img2Ground_RFM(col=center[1], lin=center[0])
prjCenter_plus = self.Img2Ground_RFM(col=center_plus[1], lin=center_plus[0])
## Estimate the UTM
epsgCode = geoRT.ComputeEpsg(lon=prjCenter[0], lat=prjCenter[1])
## Convert tot UTM projection
centerCoords = geoRT.ConvCoordMap1ToMap2_Batch(X=[prjCenter[1], prjCenter_plus[1]],
Y=[prjCenter[0], prjCenter_plus[0]],
targetEPSG=epsgCode)
xGSD = np.abs(centerCoords[0][0] - centerCoords[0][1])
yGSD = np.abs(centerCoords[1][0] - centerCoords[1][1])
return (xGSD, yGSD)
def get_altitude_range(self, scaleFactor=1):
"""
Args:
scaleFactor:
Returns:
"""
minAlt = self.altOff - scaleFactor * self.altScale
maxAlt = self.altOff + scaleFactor * self.altScale
return [minAlt, maxAlt]
if __name__ == '__main__':
# TODO add to unit/functional tests
img = '/home/cosicorr/0-WorkSpace/3-PycharmProjects/geoCosiCorr3D/geoCosiCorr3D/Tests/3-geoOrtho_Test/Sample/Sample1/SPOT2.TIF'
rfm = RFM(img, debug=True)
print(f'attitude range:{rfm.get_altitude_range()}')
print(f'GSD:{rfm.get_GSD()}')
print(f'geoTransform:{rfm.get_geoTransform()}')
| SaifAati/Geospatial-COSICorr3D | geoCosiCorr3D/geoRFM/RFM.py | RFM.py | py | 15,569 | python | en | code | 37 | github-code | 36 | [
{
"api_name": "geoCosiCorr3D.geoCore.core_RFM.RawRFM",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "loggi... |
650884033 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
class YymanhuaPipeline(object):
def process_item(self, item, spider):
# 插入数据库
item["_id"] = self.count
self.collection.insert(item)
self.count += 1
return item
def open_spider(self,spider):
self.client = MongoClient()
self.collection = self.client["pySpider"]["yymh_2"]
self.count = 1
print("数据库以连接...")
def close_spider(self, spider):
self.client.close()
print("数据库连接关闭") | jihongzhu/python- | yymanhua/yymanhua/pipelines.py | pipelines.py | py | 737 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 19,
"usage_type": "call"
}
] |
74506416103 | from django.urls import include, path, re_path
from django.urls import reverse
from rest_framework.routers import DefaultRouter
from .views import SubscriptionViewSet, UserSubscriptionView, SubscriptionCreateView, SubscriptionCancelView, SubscriptionUpdateView, SubscriptionReactiveView
app_name = 'subscription'
router = DefaultRouter(trailing_slash=False)
router.register(r'subscription', SubscriptionViewSet)
urlpatterns = [
path('', include(router.urls)),
path('subscription/user/', UserSubscriptionView.as_view()),
path('subscription/create/', SubscriptionCreateView.as_view()),
path('subscription/cancel/', SubscriptionCancelView.as_view()),
path('subscription/reactive/', SubscriptionReactiveView.as_view()),
path('subscription/update/', SubscriptionUpdateView.as_view()),
] | jubelltols/React_DRF_MySql | DRF/src/onbici/subscription/urls.py | urls.py | py | 814 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "views.SubscriptionViewSet",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
... |
8524473683 | # coding=gbk
import numpy as np
import pandas as pd
import re
from jieba import lcut
def clean_str(text):
text = text.lower()
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"that's", "that is ", text)
text = re.sub(r"there's", "there is ", text)
text = re.sub(r"it's", "it is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "can not ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text) #\'re 转义'
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
return text.strip()
#data_training的文本的下标是4,tfidf值的下标是7
#my_data的文本下标是4,点击量的下标是3
def load_data_and_labels(path):#读取文本的函数,可能要换成连接mysql的函数;注意是train.py读取文本的函数
data_x, data_x_list, data_y = [], [], []#data_x为处理前的文本,格式为一个列表中包含着装着新闻内容的列表(用于输出),data_x_list是将文本变成一个大的列表形式(用于在接下来的分词处理)
f = pd.ExcelFile(path)
io = pd.io.excel.ExcelFile(path)
for i in f.sheet_names: # 读取里面每一个sheet
dx = pd.read_excel(io, sheet_name=i, usecols=[4]) #这里是读取第五列,如果要修改读取的列数就修改这里的数字
dy = pd.read_excel(io, sheet_name=i, usecols=[7])
datax = dx.values.tolist()
datay = dy.values.tolist()
for j in datax:
l = str(j[0]).strip().replace(u'\u3000', u' ').replace(u'\xa0', u' ')
k = [str(j[0]).strip().replace(u'\u3000', u' ').replace(u'\xa0', u' ')] # 这里还需要将标点符号换掉
data_x.append(k)
data_x_list.append(l)
for m in datay:
data_y.append(m[0])
data = []
max_sentence_length = 0
max_paragraph_length = 0
for id in range(len(data_x_list)):
paragraphs = data_x_list[id]
sentences_split = re.split('(。|!|\!|\.|?|\?)',paragraphs)
sentences = []
for i in range(int(len(sentences_split) / 2)):
sent = sentences_split[2 * i] + sentences_split[2 * i + 1]
sentences.append(sent)
if max_paragraph_length < len(sentences):
max_paragraph_length = len(sentences)
for n, sentence in enumerate(sentences):
tokens = lcut(sentence)
if max_sentence_length < len(tokens):
max_sentence_length = len(tokens)
sentence = " ".join(tokens)
sentences[n] = sentence
data.append([id, sentences])
print(path)
# print("max sentence length = {}\n".format(max_sentence_length))
# print("max_paragraph_length = {}\n".format(max_paragraph_length))
df = pd.DataFrame(data=data, columns=["id", "sentences"]) #创建一个二维数据表,列名为id等
x_text = df['sentences'].tolist() #转为列表
return x_text, data_y
def batch_iter(data, batch_size, num_epochs, shuffle=False): #生成一个迭代器,输入x_batch时 共7200/10 * 100 = 72000次
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1 #获得每个epoch的batch数目,结果为720
#for epoch in range(num_epochs): #100次
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size)) #随机排列一个序列,或者数组。
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index] #一次产出batch_size数量的句子-关系对
if __name__ == "__main__":
trainFile = 'data.xlsx'
testFile = 'SemEval2010_task8_all_data/SemEval2010_task8_testing_keys/TEST_FILE_FULL.TXT'
a, b = load_data_and_labels(trainFile)
print(len(a))
print(len(b))
def eval_load_data_and_labels(path):#读取文本的函数,可能要换成连接mysql的函数;注意是eval.py读取文本的函数
data_x, data_x_list, data_y = [], [], []
f = pd.ExcelFile(path)
io = pd.io.excel.ExcelFile(path)
for i in f.sheet_names: # 读取里面每一个sheet
dx = pd.read_excel(io, sheet_name=i, usecols=[5]) # 这里是读取第五列,如果要修改读取的列数就修改这里的数字
dy = pd.read_excel(io, sheet_name=i, usecols=[8])
datax = dx.values.tolist()
datay = dy.values.tolist()
for j in datax:
l = str(j[0]).strip().replace(u'\u3000', u' ').replace(u'\xa0', u' ')
k = [str(j[0]).strip().replace(u'\u3000', u' ').replace(u'\xa0', u' ')] # 这里还需要将标点符号换掉
data_x.append(k)
data_x_list.append(l)
for m in datay:
data_y.append(m[0])
data = []
# lines = [line.strip() for line in open(path)]
max_sentence_length = 0
max_paragraph_length = 0
for id in range(len(data_x_list)): # 主要目标是分词,y值已经处理好
paragraphs = data_x_list[id] # 读取文章
sentences_split = re.split('(。|!|\!|\.|?|\?)', paragraphs)
sentences = []
for i in range(int(len(sentences_split) / 2)):
sent = sentences_split[2 * i] + sentences_split[2 * i + 1]
sentences.append(sent)
# sentences = nltk.sent_tokenize(paragraphs)#用正则分割句子
if max_paragraph_length < len(sentences):
max_paragraph_length = len(sentences)
for n, sentence in enumerate(sentences):
# sentence = clean_str(sentence)
tokens = lcut(sentence)
# tokens = nltk.word_tokenize(sentence) #用jieba分词
if max_sentence_length < len(tokens):
max_sentence_length = len(tokens)
# if len(tokens) > FLAGS.max_sentence_length:
# print(tokens)
sentence = " ".join(tokens) # 有啥区别???
sentences[n] = sentence
data.append([id, sentences])
print(path)
print("max sentence length = {}\n".format(max_sentence_length))
print("max_paragraph_length = {}\n".format(max_paragraph_length))
df = pd.DataFrame(data=data, columns=["id", "sentences"]) # 创建一个二维数据表,列名为id等
x_text = df['sentences'].tolist() # 转为列表
return x_text, data_x, data_y # x_text为处理后的文本(用在模型中),data_x为处理前的文本(用于输出)
| mrgulugulu/text_regression | data_helpers.py | data_helpers.py | py | 7,290 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.sub",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 14,
"usage_type"... |
12486698422 | import contextlib
import os
import subprocess
import tempfile
from pathlib import Path
from pprint import pprint
from shutil import copyfile
from time import monotonic, sleep
from typing import Dict
from unittest import mock
import requests
from docker_tests.command_utils import run_command
from docker_tests.constants import SOURCE_ROOT
from docker_tests.docker_tests_utils import docker_image
AIRFLOW_WWW_USER_USERNAME = os.environ.get("_AIRFLOW_WWW_USER_USERNAME", "airflow")
AIRFLOW_WWW_USER_PASSWORD = os.environ.get("_AIRFLOW_WWW_USER_PASSWORD", "airflow")
DAG_ID = "example_bash_operator"
DAG_RUN_ID = "test_dag_run_id"
def api_request(method: str, path: str, base_url: str = "http://localhost:8080/api/v1", **kwargs) -> Dict:
response = requests.request(
method=method,
url=f"{base_url}/{path}",
auth=(AIRFLOW_WWW_USER_USERNAME, AIRFLOW_WWW_USER_PASSWORD),
headers={"Content-Type": "application/json"},
**kwargs,
)
response.raise_for_status()
return response.json()
@contextlib.contextmanager
def tmp_chdir(path):
current_cwd = os.getcwd()
try:
os.chdir(path)
yield current_cwd
finally:
os.chdir(current_cwd)
def wait_for_container(container_id: str, timeout: int = 300):
container_name = (
subprocess.check_output(["docker", "inspect", container_id, "--format", '{{ .Name }}'])
.decode()
.strip()
)
print(f"Waiting for container: {container_name} [{container_id}]")
waiting_done = False
start_time = monotonic()
while not waiting_done:
container_state = (
subprocess.check_output(["docker", "inspect", container_id, "--format", '{{ .State.Status }}'])
.decode()
.strip()
)
if container_state in ("running", 'restarting'):
health_status = (
subprocess.check_output(
[
"docker",
"inspect",
container_id,
"--format",
"{{ if .State.Health }}{{ .State.Health.Status }}{{ else }}no-check{{ end }}",
]
)
.decode()
.strip()
)
print(f"{container_name}: container_state={container_state}, health_status={health_status}")
if health_status == "healthy" or health_status == "no-check":
waiting_done = True
else:
print(f"{container_name}: container_state={container_state}")
waiting_done = True
if timeout != 0 and monotonic() - start_time > timeout:
raise Exception(f"Timeout. The operation takes longer than the maximum waiting time ({timeout}s)")
sleep(1)
def wait_for_terminal_dag_state(dag_id, dag_run_id):
# Wait 30 seconds
for _ in range(30):
dag_state = api_request("GET", f"dags/{dag_id}/dagRuns/{dag_run_id}").get("state")
print(f"Waiting for DAG Run: dag_state={dag_state}")
sleep(1)
if dag_state in ("success", "failed"):
break
def test_trigger_dag_and_wait_for_result():
compose_file_path = SOURCE_ROOT / "docs" / "apache-airflow" / "start" / "docker-compose.yaml"
with tempfile.TemporaryDirectory() as tmp_dir, tmp_chdir(tmp_dir), mock.patch.dict(
'os.environ', AIRFLOW_IMAGE_NAME=docker_image
):
copyfile(str(compose_file_path), f"{tmp_dir}/docker-compose.yaml")
os.mkdir(f"{tmp_dir}/dags")
os.mkdir(f"{tmp_dir}/logs")
os.mkdir(f"{tmp_dir}/plugins")
(Path(tmp_dir) / ".env").write_text(f"AIRFLOW_UID={subprocess.check_output(['id', '-u']).decode()}\n")
print(".emv=", (Path(tmp_dir) / ".env").read_text())
copyfile(
str(SOURCE_ROOT / "airflow" / "example_dags" / "example_bash_operator.py"),
f"{tmp_dir}/dags/example_bash_operator.py",
)
run_command(["docker-compose", "config"])
run_command(["docker-compose", "down", "--volumes", "--remove-orphans"])
try:
run_command(["docker-compose", "up", "-d"])
# The --wait condition was released in docker-compose v2.1.1, but we want to support
# docker-compose v1 yet.
# See:
# https://github.com/docker/compose/releases/tag/v2.1.1
# https://github.com/docker/compose/pull/8777
for container_id in (
subprocess.check_output(["docker-compose", 'ps', '-q']).decode().strip().splitlines()
):
wait_for_container(container_id)
api_request("PATCH", path=f"dags/{DAG_ID}", json={"is_paused": False})
api_request("POST", path=f"dags/{DAG_ID}/dagRuns", json={"dag_run_id": DAG_RUN_ID})
try:
wait_for_terminal_dag_state(dag_id=DAG_ID, dag_run_id=DAG_RUN_ID)
dag_state = api_request("GET", f"dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}").get("state")
assert dag_state == "success"
except Exception:
print(f"HTTP: GET dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}")
pprint(api_request("GET", f"dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}"))
print(f"HTTP: GET dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}/taskInstances")
pprint(api_request("GET", f"dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}/taskInstances"))
raise
except Exception:
run_command(["docker", "ps"])
run_command(["docker-compose", "logs"])
raise
finally:
run_command(["docker-compose", "down", "--volumes"])
| drivendataorg/snowcast-showdown | 4th Place/images/base/docker_tests/test_docker_compose_quick_start.py | test_docker_compose_quick_start.py | py | 5,681 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_... |
18657364741 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import csv
x = []
y = []
with open('orderByDay.csv','r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
x.append(row[0])
y.append(int(row[1]))
plt.plot(x,y)
plt.xlabel('Weekdays')
plt.ylabel('Number of orders')
plt.title('Order By Weekdays')
plt.show()
| gjtqiyue/Comp421-Database-Project | Deliverable_3/PlotCsv.py | PlotCsv.py | py | 360 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.... |
1955434501 | import logging
import sqlite3.dbapi2 as sqlite3
import os
from ..config import CREATE_QUERY, TEST_QUERY
def load_database(path):
DB_INSTANCE = sqlite3.connect(path)
cursor = DB_INSTANCE.cursor()
try:
cursor.execute(TEST_QUERY)
except sqlite3.OperationalError as e:
logging.error(f"Something went wrong while dealing with the database! More info: {e}")
cursor.close()
exit(e)
logging.info("Done! Database was loaded succesfully!")
cursor.close()
def create_database(path: str):
if not os.path.isfile(path):
new = True
logging.info(f"Creating new database at {path}")
else:
new = False
logging.info(f"Loading database from {path}")
DB = sqlite3.connect(path)
if new:
cursor = DB.cursor()
try:
cursor.execute(CREATE_QUERY)
DB.commit()
cursor.close()
except sqlite3.OperationalError as e:
logging.error(f"Something went wrong while creating the database! More info: {e}")
cursor.close()
exit(e)
logging.debug(f"Succesfully ran CREATE_QUERY on database at {path}, preparing to load")
load_database(path)
| nocturn9x/AmazonOffers-Manager | AmazonBot/database/dbcreator.py | dbcreator.py | py | 1,215 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "sqlite3.dbapi2.connect",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sqlite3.dbapi2",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "config.TEST_QUERY",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "sqlite3.dba... |
25775618531 | import pydriller
from pydriller.metrics.process.code_churn import CodeChurn
from pydriller.metrics.process.contributors_count import ContributorsCount
hash1 = "f858260790250880fc74ab7108073435f534d7f1"
hash2 = "319f616e572a03b984013d04d1b3a18ffd5b1190"
repo_path = "~/workfolder/dayjs"
churn_metric = CodeChurn(path_to_repo=repo_path,
from_commit=hash1,
to_commit=hash2)
files_count = churn_metric.count()
files_max = churn_metric.max()
files_avg = churn_metric.avg()
print('Code churn total para cada arquivo: {}'.format(files_count))
print('Maximo code churn para cada arquivo: {}'.format(files_max))
print('Code churn médio para cada arquivo: {}'.format(files_avg))
count_metric = ContributorsCount(path_to_repo=repo_path,
from_commit=hash1,
to_commit=hash2)
count = count_metric.count()
minor = count_metric.count_minor()
print('Numero de contribuidores por arquivo: {}'.format(count))
print('Numero de contribuidores "menores" por arquivo: {}'.format(minor)) | vrjuliao/BCC | engenharia-de-software-2/ex-8-1/3.py | 3.py | py | 1,059 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "pydriller.metrics.process.code_churn.CodeChurn",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pydriller.metrics.process.contributors_count.ContributorsCount",
"line_number": 19,
"usage_type": "call"
}
] |
72192933225 | import redis, json, uuid, time, calendar, csv, io, os, yaml, xmltodict
from bottle import response
from dicttoxml import dicttoxml
from datetime import datetime
allowed_types = {
"application/json",
"application/xml",
"application/x-yaml",
# "text/tab-separated-values",
}
# expire after five minutes
records_expiration = 300
redis_port = os.environ.get("redis_port")
redis_pass = os.environ.get("redis_pass")
r = redis.Redis(
host="localhost",
port=redis_port,
db=0,
password=redis_pass,
charset="utf-8",
decode_responses=True,
)
def delete_message(message_id, provider_id):
keys = r.keys("provider:" + provider_id + "*/uid:" + message_id + "/timestamp*")
if len(keys) == 1:
r.delete(keys[0])
return "deleted"
elif len(keys) == 0:
response.status = 404
return "No keys found"
else:
response.status = 500
return "error: more than one keys found"
def get_messages(topic, limit, type):
# Get keys related to partial search of 'Topic'
keys = r.keys("*/topic:" + topic + "/uid:*")
# Validation
if keys == None:
raise Exception(f"No keys found with {topic}")
timestamps = {}
messages = {}
n = 0
# Loop thrigh keys and timestamps of those keys
for key in keys:
message = r.hgetall(key)
messages[n] = message
timestamps[n] = datetime.fromtimestamp(int(key[-10:]))
n += 1
# Sort by timestamps & retrieve their keys
sorted_timestamps = list(
dict(sorted(timestamps.items(), key=lambda x: x[1])).keys()
)
# index by custom limit (latest timestamps)
index_out = sorted_timestamps[-limit:]
# Dict comprehension -> return messages matching indexed keys
sorted_messages = {str(k): messages[k] for k in index_out}
# Transform the messages
if type == "application/json":
return sorted_messages
elif type == "application/xml":
return dicttoxml(sorted_messages)
elif type == "application/x-yaml":
return yaml.dump(sorted_messages)
# elif type == "text/tab-separated-values":
# body_dict = ""
# byte_str = body.read()
# text_obj = byte_str.decode("UTF-8")
# rd = csv.reader(io.StringIO(text_obj), delimiter="\t", quotechar='"')
# print(rd[1])
# for row in rd:
# print(row)
return
def save_message(body, type, topic, author):
if type == "application/json":
body_dict = json.load(body)
elif type == "application/xml":
body_dict = xmltodict.parse(body)
elif type == "application/x-yaml":
body_dict = yaml.safe_load(body)
elif type == "text/tab-separated-values":
body_dict = ""
message = body_dict["message"]
message_id = uuid.uuid1()
current_GMT = time.gmtime()
time_stamp = calendar.timegm(current_GMT)
key = f"provider:{author}/topic:{topic}/uid:{message_id}/timestamp:{time_stamp}"
r.hset(key, "m", message)
r.hset(key, "a", author)
r.hset(key, "id", str(message_id))
r.expire(key, records_expiration)
response.status = 201
return str(message_id)
| Zamanien/SI_mandatory | esb_transform.py | esb_transform.py | py | 3,154 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_... |
18775996369 | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('forms', views.form, name='form'),
path('remove/<todo_id>', views.delete_todo, name='remove'),
path('add_todo', views.add_todo, name='new_todo'),
path('update_todo/<todo_id>', views.edit_todo, name='edit_todo'),
path('chart', views.chart_view, name='chart-view'),
path('products', views.add_product, name='add-product'),
path('management', views.add_managers, name='managers'),
path('depart', views.add_department, name='department'),
]
| Torgbui-Hiram/django_crm | website/urls.py | urls.py | py | 580 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
33364078946 | from flask import Flask, request, jsonify, url_for, session, redirect, render_template
from flaskext.mysql import MySQL
from flask_oauth import OAuth
import logging
import time
from logging.handlers import RotatingFileHandler
# from urllib.requests import urlparse
app = Flask(__name__)
app.secret_key = 'secretkey'
# konfigurasi database
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'root'
app.config['MYSQL_DATABASE_DB'] = 'dbtst'
app.config['MYSQL_DATABASE_HOST'] = '172.24.0.2'
app.config['MYSQL_DATABASE_PORT'] = 3306
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
mysql.init_app(app)
oauth = OAuth()
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={'scope': 'https://www.googleapis.com/auth/userinfo.email',
'response_type': 'code'},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key='482758007120-vm7lob4gqkmr8eeeq21uo5odpnt6736g.apps.googleusercontent.com',
consumer_secret='cpNDbgd5rFiw-98asLncKZUd')
@app.route('/')
def landing():
return render_template('index.html')
@app.route('/home')
def home():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' masuk halaman utama')
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('login'))
return render_template('home.html')
@app.route('/search')
def search():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' masuk halaman pencarian')
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('login'))
return render_template('search.html')
@app.route('/register')
def register():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' masuk halaman penambahan data')
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('login'))
return render_template('register.html')
@app.route('/hapus')
def hapus():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' masuk halaman penghapusan data')
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('login'))
return render_template('hapus.html')
@app.route('/baru')
def baru():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' masuk halaman pembaruan data')
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('login'))
return render_template('baru.html')
@app.route('/login')
def login():
callback=url_for('authorized', _external=True)
return google.authorize(callback=callback)
@app.route('/authorized')
@google.authorized_handler
def authorized(resp):
access_token = resp['access_token']
session['access_token'] = access_token, ''
return redirect(url_for('home'))
@app.route('/user', methods=['GET'])
def get_user():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' mencari user')
conn = mysql.connect() # connect database
cursor = conn.cursor() # melakukan perintah
carinama = request.args.get('carinama')
query = 'SELECT * FROM mahasiswa WHERE nama=%s'
data = (carinama)
cursor.execute(query, data)
result = cursor.fetchall() # merubah hasil dari my sql menjadi list
result_baru = []
# untuk menambahkan key
for user in result:
user_baru = {
'nim': user[0],
'nama': user[1],
'jurusan': user[2],
'angkatan': user[3]
}
result_baru.append(user_baru)
return {'hasil': result_baru}
@app.route('/create', methods=['POST'])
def insert_user():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' menambahkan data')
conn = mysql.connect()
cursor = conn.cursor()
nim = request.form['nim']
nama = request.form['nama']
jurusan = request.form['jurusan']
angkatan = request.form['angkatan']
query = 'INSERT INTO mahasiswa (nim, nama, jurusan, angkatan) VALUES (%s,%s,%s,%s)'
data = (nim, nama, jurusan, angkatan)
cursor.execute(query, data)
conn.commit()
conn.close()
return 'data berhasil ditambahkan'
@app.route('/update', methods=['PUT'])
def update_user():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' memperbarui data')
conn = mysql.connect()
cursor = conn.cursor()
nim = request.form['nim']
nama = request.form['nama']
jurusan = request.form['jurusan']
angkatan = request.form['angkatan']
query = 'UPDATE mahasiswa SET nama=%s, jurusan=%s, angkatan=%s WHERE nim=%s'
data = (nama, jurusan, angkatan, nim)
cursor.execute(query, data)
conn.commit()
conn.close()
return 'data berhasil diupdate'
@app.route('/delete', methods=['DELETE'])
def delete_user():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' menghapus data')
conn = mysql.connect()
cursor = conn.cursor()
query = 'DELETE FROM mahasiswa WHERE nim=%s'
data = request.form['carinim']
cursor.execute(query, data)
conn.commit()
conn.close()
return 'data berhasil didelete'
# klo update querynya aja yg diganti. tetep tambah commit execute
if __name__ == "__main__":
handler = RotatingFileHandler('tst.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.run(host='0.0.0.0') | Iann221/UASTST | api.py | api.py | py | 5,898 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flaskext.mysql.MySQL",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_oauth.OAuth",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.render_templa... |
23618738970 | def test(path):
from glob import glob
from os.path import join
from shutil import rmtree
from tempfile import mkdtemp
from numpy import all, abs
from quantities import kbar, eV, angstrom
from pylada.crystal import Structure
from pylada.vasp import Vasp
from pylada.vasp.relax import Relax
from pylada import default_comm
structure = Structure([[0, 0.5, 0.5],[0.5, 0, 0.5], [0.5, 0.5, 0]], scale=5.43, name='has a name')\
.add_atom(0,0,0, "Si")\
.add_atom(0.25,0.25,0.25, "Si")
vasp = Vasp()
vasp.kpoints = "Automatic generation\n0\nMonkhorst\n2 2 2\n0 0 0"
vasp.prec = "accurate"
vasp.ediff = 1e-5
vasp.encut = 1
vasp.ismear = "fermi"
vasp.sigma = 0.01
vasp.relaxation = "volume"
vasp.add_specie = "Si", "{0}/pseudos/Si".format(path)
directory = mkdtemp()
try:
functional = Relax(copy=vasp)
assert abs(functional.ediff - 1e-5) < 1e-8
assert functional.prec == 'Accurate'
result = functional(structure, outdir=directory, comm=default_comm,
relaxation="volume ionic cellshape")
assert result.success
def sortme(a): return int(a.split('/')[-1])
dirs = sorted(glob(join(join(directory, '*'), '[0-9]')), key=sortme)
# for previous, current in zip(dirs, dirs[1:]):
# assert len(check_output(['diff', join(previous, 'CONTCAR'), join(current, 'POSCAR')])) == 0
# assert len(check_output(['diff', join(current, 'CONTCAR'), join(directory, 'POSCAR')])) == 0
assert result.stress.units == kbar and all(abs(result.stress) < 1e0)
assert result.forces.units == eV/angstrom and all(abs(result.forces) < 1e-1)
assert result.total_energy.units == eV and all(abs(result.total_energy + 10.668652*eV) < 1e-2)
finally:
if directory != '/tmp/test/relax': rmtree(directory)
pass
if __name__ == "__main__":
from sys import argv
test(argv[1])
| mdavezac/LaDa | vasp/tests/runrelax.py | runrelax.py | py | 1,932 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "pylada.crystal.Structure",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pylada.vasp.Vasp",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pylada.vas... |
22703498747 | import spacy
import numpy
import os
from numpy import dot
from numpy.linalg import norm
nlp = spacy.load('en_core_web_lg')
def compare_wordlists_by_spacy_vectors(model, wordlist1, wordlist2):
vectorlist1 = model(wordlist1)
vectorlist2 = model(wordlist2) #unicode string
myData = [ ]
for wordvec1 in vectorlist1:
for wordvec2 in vectorlist2:
myData.append([wordvec1, wordvec2, wordvec1.similarity(wordvec2)])
#print(wordvec1, wordvec2)
#print(wordvec1.similarity(wordvec2))
#print("")
return myData
#wordlist1 = (u'illegals refugees immigrants humans')
#wordlist2 = (u'love hate anger disgust')
#compare_wordlists_by_spacy_vectors(nlp, wordlist1, wordlist2)
def compare_within_wordlist_by_spacy_vectors(model, wordlist1):
vectorlist1 = model(wordlist1)
vectorlist2 = model(wordlist1) #unicode string
myData = [ ]
for wordvec1 in vectorlist1:
for wordvec2 in vectorlist2:
myData.append([wordvec1, wordvec2, wordvec1.similarity(wordvec2)])
#print(wordvec1, wordvec2)
#print(wordvec1.similarity(wordvec2))
#print("")
return myData
#wordlist1 = (u'illegals refugees immigrants humans') #list of strings = input at present
#wordlist2 = (u'love hate anger disgust')
#compare_within_wordlist_by_spacy_vectors(nlp, wordlist1)
#compare_within_wordlist_by_spacy_vectors(nlp, wordlist2)
def similar_strings(model, word):
'''
using spaCy .vocab to find top 20 orthographic similar words to an input word
'''
myData = [ ]
# cosine similarity
cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))
# gather all known words, take only the lowercased versions
allWords = list({w for w in model.vocab if w.has_vector and w.orth_.islower() and w.lower_ != word })
# sort by similarity to word
allWords.sort(key=lambda w: cosine(w.vector, word.vector))
allWords.reverse()
print("Top 20 most similar words to" , word.orth_,":")
for word in allWords[:20]:
myData.append(word.orth_)
return myData
#hit = nlp.vocab[u'hit']
#similar_strings(nlp, hit)
| kariemoorman/iat-weat-wefat | scripts/comparison_functions/spacy_word_comparison_functions.py | spacy_word_comparison_functions.py | py | 2,179 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "spacy.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 57,
"usage_type": "call"
}
] |
12262454581 | import sys
import contentful_management
from env import *
client = contentful_management.Client(MANAGEMENT_API_TOKEN)
space = client.spaces().find(SPACE_ID)
environment = space.environments().find('master')
content_types = environment.content_types().all()
content_type = content_types[0]
if '--test' in sys.argv:
# Test by getting entries and showing local image locations
print('Testing...')
print('Your formation index file is set to: ' + FORMATION_INDEX)
print('Your formation image file directory is set to: ' + FORMATION_IMAGE_DIR)
entries = content_type.entries().all()
sys.exit()
if '--delete-all' in sys.argv:
# Delete all entries
entries = content_type.entries().all()
for entry in entries:
if entry.is_published:
entry.unpublish()
environment.entries().delete(entry.id)
# Delete all assets
assets = environment.assets().all()
for asset in assets:
if asset.is_published:
asset.unpublish()
environment.assets().delete(asset.id)
sys.exit()
with open(FORMATION_INDEX, 'r') as formation_file:
for line in formation_file:
formation_code, formation_name = line.strip().split(None, 1)
formation_size = int(formation_code.split('-')[0])
formation_filename = formation_code + '.png'
print(formation_code, formation_name)
# Create a new upload to get the image into Contentful
upload = space.uploads().create(FORMATION_IMAGE_DIR + '/' + formation_filename)
# Associate an asset with the uploaded image
new_asset = environment.assets().create(
None,
{
'fields': {
'title': {
'en-US': formation_code + ': ' + formation_name
},
'file': {
'en-US': {
'fileName': formation_filename,
'contentType': 'image/png',
'uploadFrom': upload.to_link().to_json()
}
}
}
}
)
# Process the asset
new_asset.process()
# Wait for the asset processing to be complete
while True:
processed_asset = environment.assets().find(new_asset.id)
try:
if 'url' in processed_asset.file:
break
except:
continue
# And then publish
processed_asset.publish()
# Add an entry that references the asset
entry_attributes = {
'content_type_id': content_type.id,
'fields': {
'name': {
'en-US': formation_name
},
'code': {
'en-US': formation_code
},
'size': {
'en-US': formation_size
},
'diagram': {
'en-US': {
'sys': {
'type': 'Link',
'linkType': 'Asset',
'id': processed_asset.id
}
}
}
}
}
new_entry = environment.entries().create(
None,
entry_attributes
)
new_entry.publish()
| wildlava/skydiving-formations-react | tools/ingest_formations.py | ingest_formations.py | py | 3,444 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "contentful_management.Client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"li... |
10755048516 | #date:21-9-17
import pafy
url="https://www.youtube.com/watch?v=mkKXS0FI_L4"
video=pafy.new(url)
audiostreams = video.audiostreams
for a in audiostreams:
print(a.bitrate, a.extension, a.get_filesize())
########to download audio directly
#audiostreams[1].download()
bestaudio = video.getbestaudio()
bestaudio.bitrate
########To download best audio
#bestaudio.download()
##to see all the streams available
allstreams = video.allstreams
for s in allstreams:
print(s.mediatype, s.extension, s.quality)
| pemagrg1/YoutubeDownloader | download_audio.py | download_audio.py | py | 513 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pafy.new",
"line_number": 4,
"usage_type": "call"
}
] |
37204976551 | #!/usr/bin/python3
from __future__ import print_function
import os
import sys
import torch
import torch.backends.cudnn as cudnn
import argparse
import cv2
import numpy as np
from collections import OrderedDict
sys.path.append(os.getcwd() + '/../../src')
from config import cfg
from prior_box import PriorBox
from detection import Detect
from nms import nms
from utils import decode
from timer import Timer
from yufacedetectnet import YuFaceDetectNet
parser = argparse.ArgumentParser(description='Face and Mask Detection')
parser.add_argument('-m', '--trained_model', default='weights/yunet_final.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--image_file', default='t1.jpg', type=str, help='the image file to be detected')
parser.add_argument('--conf_thresh', default=0.5, type=float, help='conf_thresh')
parser.add_argument('--top_k', default=20, type=int, help='top_k')
parser.add_argument('--nms_thresh', default=0.5, type=float, help='nms_thresh')
parser.add_argument('--keep_top_k', default=20, type=int, help='keep_top_k')
parser.add_argument('--device', default='cuda:0', help='which device the program will run on. cuda:0, cuda:1, ...')
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
labels = ('_background_', 'face', 'mask')
num_classes = 3
detect = Detect(num_classes, 0, args.top_k, args.conf_thresh, args.nms_thresh)
if __name__ == '__main__':
# img_dim = 320
device = torch.device(args.device)
torch.set_grad_enabled(False)
# net and model
net = YuFaceDetectNet(phase='test', size=None ) # initialize detector
net = load_model(net, args.trained_model, True)
# net.load_state_dict(torch.load(args.trained_model))
net.eval()
print('Finished loading model!')
## Print model's state_dict
#print("Model's state_dict:")
#for param_tensor in net.state_dict():
# print(param_tensor, "\t", net.state_dict()[param_tensor].size())
cudnn.benchmark = True
net = net.to(device)
_t = {'forward_pass': Timer(), 'misc': Timer()}
# testing begin
img_raw = cv2.imread(args.image_file, cv2.IMREAD_COLOR)
img = np.float32(img_raw)
im_height, im_width, _ = img.shape
#img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
_t['forward_pass'].tic()
loc, conf = net(img) # forward pass
_t['forward_pass'].toc()
_t['misc'].tic()
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
detections = detect(loc, conf, priors)
# detections = out.data
print(detections.size())
scale = torch.Tensor([im_width, im_height, im_width, im_height])
# scale = scale.to(device)
for i in range(detections.size(1)):
j = 0
while detections[0,i,j,0] >= 0.6:
score = detections[0,i,j,0]
label_name = labels[i]
display_txt = '%s: %.2f'%(label_name, score)
pt = (detections[0,i,j,1:]*scale).cpu().numpy()
j+=1
pts = (int(pt[0]), int(pt[1]))
pte = (int(pt[2]), int(pt[3]))
cx = int(pt[0])
cy = int(pt[1]) + 12
cv2.rectangle(img_raw, pts, pte, (0, 255, 0), 2)
cv2.putText(img_raw, label_name, (cx, cy), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
cv2.imshow('facemask', img_raw)
cv2.waitKey(0)
| tienhoangvan/libfacemaskdet | train/tasks/task1/demo.py | demo.py | py | 5,078 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
... |
16076363640 | import argparse
import os
import statistics
from typing import Callable
import dotenv
from tqdm import tqdm
from headhunter import get_vacancies_from_hh, fetch_areas_ids, predict_rub_salary_hh
from salary_helpers import create_table
from superjob import get_vacancies_from_sj, predict_rub_salary_sj, fetch_town_ids
PROGRAMMING_LANGUAGES = [
"TypeScript",
"Swift",
"Scala",
"Objective-C",
"Shell",
"Go",
"C",
"C#",
"C++",
"PHP",
"Ruby",
"Python",
"Java",
"JavaScript",
]
def get_vacancies_statistics(
vacancies: dict, predict_rub_salary_method: Callable
) -> dict | None:
salaries = []
for vacancy in vacancies.get("items"):
salary = predict_rub_salary_method(vacancy)
if not salary:
continue
salaries.append(salary)
average_salary = round(statistics.mean(salaries), 0) if salaries else 0
return {
"vacancies_found": vacancies.get("found"),
"vacancies_processed": len(salaries),
"average_salary": int(average_salary),
}
def main():
parser = argparse.ArgumentParser(
description="Collects statistics on salaries of programming languages."
)
parser.add_argument(
"-l",
"--location",
type=str,
help="Search area. (default: Москва)",
default="Москва",
)
args = parser.parse_args()
dotenv.load_dotenv()
salary_statistics = {}
area_ids = fetch_areas_ids(args.location)
town_ids = fetch_town_ids(os.getenv("SJ_API_KEY"), args.location)
for language in tqdm(PROGRAMMING_LANGUAGES):
vacancy_name = "Программист {}".format(language)
vacancies = get_vacancies_from_hh(
text=vacancy_name,
area_ids=area_ids,
)
head_hunter = salary_statistics.setdefault("Head Hunter", {})
head_hunter[language] = (
get_vacancies_statistics(vacancies, predict_rub_salary_hh) or {}
)
vacancies = get_vacancies_from_sj(
os.getenv("SJ_API_KEY"), text=vacancy_name, town_ids=town_ids
)
super_job = salary_statistics.setdefault("Super Job", {})
super_job[language] = (
get_vacancies_statistics(vacancies, predict_rub_salary_sj) or {}
)
for platform_title, stat in salary_statistics.items():
print(create_table(stat, f"{platform_title} {args.location}"))
if __name__ == "__main__":
main()
| shadowsking/salary-statistics | main.py | main.py | py | 2,481 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Callable",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "statistics.mean",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "dotenv.load_d... |
27895854947 | from django.db import models
from django.conf import settings
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
import uuid
channel_layer = get_channel_layer()
class MessageManager(models.Manager):
def get_pending_messages(self, user):
pending_messages_qs = user.pending_messages.order_by('timestamp')
for message in pending_messages_qs:
message.remove_user_from_pending(user)
return pending_messages_qs
def mark_room_as_read(self, user, room):
unread_messages_qs = user.unread_messages.filter(room=room)
for message in unread_messages_qs:
message.mark_as_read(user)
return unread_messages_qs
class Message(models.Model):
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
room = models.ForeignKey(
'Room',
related_name='messages',
on_delete=models.CASCADE)
body = models.TextField(max_length=500, default='')
timestamp = models.DateTimeField(auto_now_add=True)
pending_reception = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='pending_messages')
pending_read = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='unread_messages')
front_key = models.UUIDField(
verbose_name="frontend key",
default=uuid.uuid4,
unique=True)
objects = MessageManager()
class Meta:
verbose_name = "Mensaje"
verbose_name_plural = "Mensajes"
def __str__(self):
return self.body
def signal_to_room(self, message, data={}):
for participant in self.room.participants.all():
async_to_sync(channel_layer.group_send)(
f"group_general_user_{participant.id}", {
"type": "chat_message",
"message": message,
'data': data
})
def remove_user_from_pending(self, user):
if self.pending_reception.filter(id=user.id).exists():
self.pending_reception.remove(user)
# If there are no more pending then signal
if not self.pending_reception.exists():
self.signal_to_room('update_message', {
'message_id': self.id,
'kind': 'all_received'
})
def mark_as_read(self, user):
if self.pending_read.filter(id=user.id).exists():
self.pending_read.remove(user)
# If there are no more pending then signal
if not self.pending_read.exists():
self.signal_to_room('update_message', {
'message_id': self.id,
'kind': 'all_read'
})
class Room(models.Model):
class RoomKind(models.IntegerChoices):
PRIVATE = 1
GROUP = 2
group_name = models.CharField(max_length=255, blank=True, null=True)
kind = models.IntegerField(choices=RoomKind.choices)
participants = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='rooms')
created_at = models.DateTimeField(
verbose_name='Creation Date',
auto_now_add=True)
last_activity = models.DateTimeField(
verbose_name='Last activity date',
auto_now=True)
class Meta:
verbose_name = "Sala"
verbose_name_plural = "Salas"
def signal_to_room(self, message, data={}):
for participant in self.participants.all():
async_to_sync(channel_layer.group_send)(
f"group_general_user_{participant.id}", {
"type": "chat_message",
"message": message,
'data': data
})
| stgoddv/whatsapp-clone-django-vuejs | djchat/chat/models.py | models.py | py | 3,772 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "channels.layers.get_channel_layer",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.db.models.Manager",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 12,
"usage_type": "name"
},
{
"a... |
8444672238 | import functools
import warnings
import numpy
import cupy
import cupyx.scipy.fft
def _wraps_polyroutine(func):
def _get_coeffs(x):
if isinstance(x, cupy.poly1d):
return x._coeffs
if cupy.isscalar(x):
return cupy.atleast_1d(x)
if isinstance(x, cupy.ndarray):
x = cupy.atleast_1d(x)
if x.ndim == 1:
return x
raise ValueError('Multidimensional inputs are not supported')
raise TypeError('Unsupported type')
def wrapper(*args):
coeffs = [_get_coeffs(x) for x in args]
out = func(*coeffs)
if all(not isinstance(x, cupy.poly1d) for x in args):
return out
if isinstance(out, cupy.ndarray):
return cupy.poly1d(out)
if isinstance(out, tuple):
return tuple([cupy.poly1d(x) for x in out])
assert False # Never reach
return functools.update_wrapper(wrapper, func)
def poly(seq_of_zeros):
"""Computes the coefficients of a polynomial with the given roots sequence.
Args:
seq_of_zeros (cupy.ndarray): a sequence of polynomial roots.
Returns:
cupy.ndarray: polynomial coefficients from highest to lowest degree.
.. warning::
This function doesn't support general 2d square arrays currently.
Only complex Hermitian and real symmetric 2d arrays are allowed.
.. seealso:: :func:`numpy.poly`
"""
x = seq_of_zeros
if x.ndim == 2 and x.shape[0] == x.shape[1] and x.shape[0] != 0:
if cupy.array_equal(x, x.conj().T):
x = cupy.linalg.eigvalsh(x)
else:
raise NotImplementedError('Only complex Hermitian and real '
'symmetric 2d arrays are supported '
'currently')
elif x.ndim == 1:
x = x.astype(cupy.mintypecode(x.dtype.char), copy=False)
else:
raise ValueError('Input must be 1d or non-empty square 2d array.')
if x.size == 0:
return 1.0
size = 2 ** (x.size - 1).bit_length()
a = cupy.zeros((size, 2), x.dtype)
a[:, 0].fill(1)
cupy.negative(x, out=a[:x.size, 1])
while size > 1:
size = size // 2
a = cupy._math.misc._fft_convolve(a[:size], a[size:], 'full')
return a[0, :x.size + 1]
@_wraps_polyroutine
def polyadd(a1, a2):
"""Computes the sum of two polynomials.
Args:
a1 (scalar, cupy.ndarray or cupy.poly1d): first input polynomial.
a2 (scalar, cupy.ndarray or cupy.poly1d): second input polynomial.
Returns:
cupy.ndarray or cupy.poly1d: The sum of the inputs.
.. seealso:: :func:`numpy.polyadd`
"""
if a1.size < a2.size:
a1, a2 = a2, a1
out = cupy.pad(a2, (a1.size - a2.size, 0))
out = out.astype(cupy.result_type(a1, a2), copy=False)
out += a1
return out
@_wraps_polyroutine
def polysub(a1, a2):
"""Computes the difference of two polynomials.
Args:
a1 (scalar, cupy.ndarray or cupy.poly1d): first input polynomial.
a2 (scalar, cupy.ndarray or cupy.poly1d): second input polynomial.
Returns:
cupy.ndarray or cupy.poly1d: The difference of the inputs.
.. seealso:: :func:`numpy.polysub`
"""
if a1.shape[0] <= a2.shape[0]:
out = cupy.pad(a1, (a2.shape[0] - a1.shape[0], 0))
out = out.astype(cupy.result_type(a1, a2), copy=False)
out -= a2
else:
out = cupy.pad(a2, (a1.shape[0] - a2.shape[0], 0))
out = out.astype(cupy.result_type(a1, a2), copy=False)
out -= 2 * out - a1
return out
@_wraps_polyroutine
def polymul(a1, a2):
"""Computes the product of two polynomials.
Args:
a1 (scalar, cupy.ndarray or cupy.poly1d): first input polynomial.
a2 (scalar, cupy.ndarray or cupy.poly1d): second input polynomial.
Returns:
cupy.ndarray or cupy.poly1d: The product of the inputs.
.. seealso:: :func:`numpy.polymul`
"""
a1 = cupy.trim_zeros(a1, trim='f')
a2 = cupy.trim_zeros(a2, trim='f')
if a1.size == 0:
a1 = cupy.array([0.], a1.dtype)
if a2.size == 0:
a2 = cupy.array([0.], a2.dtype)
return cupy.convolve(a1, a2)
def _polypow_direct(x, n):
if n == 0:
return 1
if n == 1:
return x
if n % 2 == 0:
return _polypow(cupy.convolve(x, x), n // 2)
return cupy.convolve(x, _polypow(cupy.convolve(x, x), (n - 1) // 2))
def _polypow(x, n):
if n == 0:
return 1
if n == 1:
return x
method = cupy._math.misc._choose_conv_method(x, x, 'full')
if method == 'direct':
return _polypow_direct(x, n)
elif method == 'fft':
if x.dtype.kind == 'c':
fft, ifft = cupy.fft.fft, cupy.fft.ifft
else:
fft, ifft = cupy.fft.rfft, cupy.fft.irfft
out_size = (x.size - 1) * n + 1
size = cupyx.scipy.fft.next_fast_len(out_size)
fx = fft(x, size)
fy = cupy.power(fx, n, fx)
y = ifft(fy, size)
return y[:out_size]
else:
assert False
def _polyfit_typecast(x):
if x.dtype.kind == 'c':
return x.astype(numpy.complex128, copy=False)
return x.astype(numpy.float64, copy=False)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""Returns the least squares fit of polynomial of degree deg
to the data y sampled at x.
Args:
x (cupy.ndarray): x-coordinates of the sample points of shape (M,).
y (cupy.ndarray): y-coordinates of the sample points of shape
(M,) or (M, K).
deg (int): degree of the fitting polynomial.
rcond (float, optional): relative condition number of the fit.
The default value is ``len(x) * eps``.
full (bool, optional): indicator of the return value nature.
When False (default), only the coefficients are returned.
When True, diagnostic information is also returned.
w (cupy.ndarray, optional): weights applied to the y-coordinates
of the sample points of shape (M,).
cov (bool or str, optional): if given, returns the coefficients
along with the covariance matrix.
Returns:
cupy.ndarray or tuple:
p (cupy.ndarray of shape (deg + 1,) or (deg + 1, K)):
Polynomial coefficients from highest to lowest degree
residuals, rank, singular_values, rcond \
(cupy.ndarray, int, cupy.ndarray, float):
Present only if ``full=True``.
Sum of squared residuals of the least-squares fit,
rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of ``rcond``.
V (cupy.ndarray of shape (M, M) or (M, M, K)):
Present only if ``full=False`` and ``cov=True``.
The covariance matrix of the polynomial coefficient estimates.
.. warning::
numpy.RankWarning: The rank of the coefficient matrix in the
least-squares fit is deficient. It is raised if ``full=False``.
.. seealso:: :func:`numpy.polyfit`
"""
if x.dtype.char == 'e' and y.dtype.kind == 'b':
raise NotImplementedError('float16 x and bool y are not'
' currently supported')
if y.dtype == numpy.float16:
raise TypeError('float16 y are not supported')
x = _polyfit_typecast(x)
y = _polyfit_typecast(y)
deg = int(deg)
if deg < 0:
raise ValueError('expected deg >= 0')
if x.ndim != 1:
raise TypeError('expected 1D vector for x')
if x.size == 0:
raise TypeError('expected non-empty vector for x')
if y.ndim < 1 or y.ndim > 2:
raise TypeError('expected 1D or 2D array for y')
if x.size != y.shape[0]:
raise TypeError('expected x and y to have same length')
lhs = cupy.polynomial.polynomial.polyvander(x, deg)[:, ::-1]
rhs = y
if w is not None:
w = _polyfit_typecast(w)
if w.ndim != 1:
raise TypeError('expected a 1-d array for weights')
if w.size != x.size:
raise TypeError('expected w and y to have the same length')
lhs *= w[:, None]
if rhs.ndim == 2:
w = w[:, None]
rhs *= w
if rcond is None:
rcond = x.size * cupy.finfo(x.dtype).eps
scale = cupy.sqrt((cupy.square(lhs)).sum(axis=0))
lhs /= scale
c, resids, rank, s = cupy.linalg.lstsq(lhs, rhs, rcond)
if y.ndim > 1:
scale = scale.reshape(-1, 1)
c /= scale
order = deg + 1
if rank != order and not full:
msg = 'Polyfit may be poorly conditioned'
warnings.warn(msg, numpy.RankWarning, stacklevel=4)
if full:
if resids.dtype.kind == 'c':
resids = cupy.absolute(resids)
return c, resids, rank, s, rcond
if cov:
base = cupy.linalg.inv(cupy.dot(lhs.T, lhs))
base /= cupy.outer(scale, scale)
if cov == 'unscaled':
factor = 1
elif x.size > order:
factor = resids / (x.size - order)
else:
raise ValueError('the number of data points must exceed order'
' to scale the covariance matrix')
if y.ndim != 1:
base = base[..., None]
return c, base * factor
return c
def polyval(p, x):
"""Evaluates a polynomial at specific values.
Args:
p (cupy.ndarray or cupy.poly1d): input polynomial.
x (scalar, cupy.ndarray): values at which the polynomial
is evaluated.
Returns:
cupy.ndarray or cupy.poly1d: polynomial evaluated at x.
.. warning::
This function doesn't currently support poly1d values to evaluate.
.. seealso:: :func:`numpy.polyval`
"""
if isinstance(p, cupy.poly1d):
p = p.coeffs
if not isinstance(p, cupy.ndarray) or p.ndim == 0:
raise TypeError('p must be 1d ndarray or poly1d object')
if p.ndim > 1:
raise ValueError('p must be 1d array')
if isinstance(x, cupy.poly1d):
# TODO(asi1024): Needs performance improvement.
dtype = numpy.result_type(x.coeffs, 1)
res = cupy.poly1d(cupy.array([0], dtype=dtype))
prod = cupy.poly1d(cupy.array([1], dtype=dtype))
for c in p[::-1]:
res = res + prod * c
prod = prod * x
return res
dtype = numpy.result_type(p.dtype.type(0), x)
p = p.astype(dtype, copy=False)
if p.size == 0:
return cupy.zeros(x.shape, dtype)
if dtype == numpy.bool_:
return p.any() * x + p[-1]
if not cupy.isscalar(x):
x = cupy.asarray(x, dtype=dtype)[..., None]
x = x ** cupy.arange(p.size, dtype=dtype)
return (p[::-1] * x).sum(axis=-1, dtype=dtype)
def roots(p):
"""Computes the roots of a polynomial with given coefficients.
Args:
p (cupy.ndarray or cupy.poly1d): polynomial coefficients.
Returns:
cupy.ndarray: polynomial roots.
.. warning::
This function doesn't support currently polynomial coefficients
whose companion matrices are general 2d square arrays. Only those
with complex Hermitian or real symmetric 2d arrays are allowed.
The current `cupy.roots` doesn't guarantee the order of results.
.. seealso:: :func:`numpy.roots`
"""
if isinstance(p, cupy.poly1d):
p = p.coeffs
if p.dtype.kind == 'b':
raise NotImplementedError('boolean inputs are not supported')
if p.ndim == 0:
raise TypeError('0-dimensional input is not allowed')
if p.size < 2:
return cupy.array([])
[p] = cupy.polynomial.polyutils.as_series([p[::-1]])
if p.size < 2:
return cupy.array([])
if p.size == 2:
out = (-p[0] / p[1])[None]
if p[0] == 0:
out = out.real.astype(numpy.float64)
return out
cmatrix = cupy.polynomial.polynomial.polycompanion(p)
# TODO(Dahlia-Chehata): Support after cupy.linalg.eigvals is supported
if cupy.array_equal(cmatrix, cmatrix.conj().T):
out = cupy.linalg.eigvalsh(cmatrix)
else:
raise NotImplementedError('Only complex Hermitian and real '
'symmetric 2d arrays are supported '
'currently')
return out.astype(p.dtype)
| cupy/cupy | cupy/lib/_routines_poly.py | _routines_poly.py | py | 12,381 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "cupy.poly1d",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cupy.isscalar",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cupy.atleast_1d",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cupy.ndarray",
"li... |
22215949702 | import pytest
from hamcrest import assert_that, equal_to
from gairl.memory.prioritized_replay_buffer import _SumTree
def test_init_valid():
# When
tree = _SumTree(8)
# Then
assert_that(tree.total_priority, equal_to(0))
assert_that(tree.priorities_range, equal_to((1, 1)))
assert_that(tree._data, equal_to([0]*8))
assert_that(tree._tree, equal_to([0]*15))
def test_init_capacity_not_power_2():
# When / Then
with pytest.raises(AssertionError):
_SumTree(10)
def test_add_not_full():
# Given
tree = _SumTree(16)
# When
tree.add((1, 'a', 1.), 1)
tree.add(('b', 2, 2.), 0.1)
tree.add(195, 3)
tree.add((3, 3., 'c'), 1)
tree.add('d', 5)
tree.add(19287412.214121, 0.1)
tree.add(0, 9)
# Then
assert_that(tree.priorities_range, equal_to((0.1, 9)))
assert_that(tree._max_priorities_num, equal_to(1))
assert_that(tree._min_priorities_num, equal_to(2))
assert_that(tree._data, equal_to([
(1, 'a', 1.), ('b', 2, 2.), 195,
(3, 3., 'c'), 'd', 19287412.214121, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0
]))
assert_that(tree._tree, equal_to([
19.2,
19.2, 0,
5.1, 14.1, 0, 0,
1.1, 4, 5.1, 9, 0, 0, 0, 0,
1, 0.1, 3, 1, 5, 0.1, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0
]))
def test_add_overflow():
# Given
tree = _SumTree(4)
# When
tree.add((1, 'a', 1.), 1)
tree.add(('b', 2, 2.), 0.1)
tree.add(195, 3)
tree.add((3, 3., 'c'), 1)
tree.add('d', 5)
tree.add(19287412.214121, 0.1)
tree.add(0, 9)
# Then
assert_that(tree.priorities_range, equal_to((0.1, 9)))
assert_that(tree._max_priorities_num, equal_to(1))
assert_that(tree._min_priorities_num, equal_to(1))
assert_that(tree._data, equal_to(['d', 19287412.214121, 0, (3, 3., 'c')]))
assert_that(tree._tree, equal_to([15.1, 5.1, 10, 5, 0.1, 9, 1]))
def test_get_not_full():
# Given
tree = _SumTree(16)
tree._data = [
(1, 'a', 1.), ('b', 2, 2.), 195,
(3, 3., 'c'), 'd', 19287412.214121, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0
]
tree._tree = [
22,
22, 0,
7, 15, 0, 0,
4, 3, 6, 9, 0, 0, 0, 0,
1, 3, 1, 2, 1, 5, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
# When
data1 = tree.get_data(9.3)
data2 = tree.get_data(10.7)
data3 = tree.get_data(5.1)
data4 = tree.get_data(0)
data5 = tree.get_data(22)
data6 = tree.get_data(13.001)
data7 = tree.get_data(1.9)
# Then
assert_that(data1, equal_to((19287412.214121, 5, 5)))
assert_that(data2, equal_to((19287412.214121, 5, 5)))
assert_that(data3, equal_to(((3, 3., 'c'), 3, 2)))
assert_that(data4, equal_to(((1, 'a', 1.), 0, 1)))
assert_that(data5, equal_to((0, 6, 9)))
assert_that(data6, equal_to((0, 6, 9)))
assert_that(data7, equal_to((('b', 2, 2.), 1, 3)))
def test_get_overflow():
# Given
tree = _SumTree(4)
tree._data = ['d', 19287412.214121, 0, (3, 3., 'c')]
tree._tree = [
15,
5, 10,
4, 1, 7, 3
]
# When
data1 = tree.get_data(0.31)
data2 = tree.get_data(4.7)
data3 = tree.get_data(11.9999)
data4 = tree.get_data(12.1)
data5 = tree.get_data(15)
# Then
assert_that(data1, equal_to(('d', 0, 4)))
assert_that(data2, equal_to((19287412.214121, 1, 1)))
assert_that(data3, equal_to((0, 2, 7)))
assert_that(data4, equal_to(((3, 3., 'c'), 3, 3)))
assert_that(data5, equal_to(((3, 3., 'c'), 3, 3)))
def test_get_higher_than_total():
# Given
tree = _SumTree(4)
tree._data = ['d', 19287412.214121, 0, (3, 3., 'c')]
tree._tree = [
15,
5, 10,
4, 1, 7, 3
]
# When
with pytest.raises(AssertionError):
tree.get_data(15.001)
def test_update_priority_no_maxmin_change():
# Given
tree = _SumTree(8)
tree._data = ['d', 19287412.214121, 0, (3, 3., 'c')]
tree._tree = [
16,
6, 10,
4, 2, 7, 3,
1, 3, 1, 1, 4, 3, 1, 2
]
tree._min_priority = 1
tree._min_priorities_num = 4
tree._max_priority = 4
tree._max_priorities_num = 1
# When
tree.update_priority(0, 2)
tree.update_priority(2, 2)
tree.update_priority(5, 2)
tree.update_priority(6, 2)
# Then
assert_that(tree.priorities_range, equal_to((1, 4)))
assert_that(tree._max_priorities_num, equal_to(1))
assert_that(tree._min_priorities_num, equal_to(1))
assert_that(tree._data, equal_to(['d', 19287412.214121, 0, (3, 3., 'c')]))
assert_that(tree._tree, equal_to([
18,
8, 10,
5, 3, 6, 4,
2, 3, 2, 1, 4, 2, 2, 2
]))
def test_update_priority_maxmin_run_out():
# Given
tree = _SumTree(8)
tree._data = ['d', 19287412.214121, 0, (3, 3., 'c')]
tree._tree = [
18,
8, 10,
5, 3, 7, 3,
2, 3, 1, 2, 4, 3, 1, 2
]
tree._min_priority = 1
tree._min_priorities_num = 2
tree._max_priority = 4
tree._max_priorities_num = 1
# When
tree.update_priority(4, 3)
tree.update_priority(2, 2)
tree.update_priority(6, 3)
tree.update_priority(1, 3)
tree.update_priority(3, 2)
tree.update_priority(5, 2)
# Then
assert_that(tree.priorities_range, equal_to((2, 3)))
assert_that(tree._max_priorities_num, equal_to(3))
assert_that(tree._min_priorities_num, equal_to(5))
assert_that(tree._data, equal_to(['d', 19287412.214121, 0, (3, 3., 'c')]))
assert_that(tree._tree, equal_to([
19,
9, 10,
5, 4, 5, 5,
2, 3, 2, 2, 3, 2, 3, 2
]))
def test_update_priority_maxmin_overwrite():
tree = _SumTree(8)
tree._data = ['d', 19287412.214121, 0, (3, 3., 'c')]
tree._tree = [
18,
8, 10,
5, 3, 7, 3,
2, 3, 1, 2, 4, 3, 1, 2
]
tree._min_priority = 1
tree._min_priorities_num = 2
tree._max_priority = 4
tree._max_priorities_num = 1
# When
tree.update_priority(1, 5)
tree.update_priority(4, 0.5)
tree.update_priority(3, 1)
tree.update_priority(7, 5)
# Then
assert_that(tree.priorities_range, equal_to((0.5, 5)))
assert_that(tree._max_priorities_num, equal_to(2))
assert_that(tree._min_priorities_num, equal_to(1))
assert_that(tree._data, equal_to(['d', 19287412.214121, 0, (3, 3., 'c')]))
assert_that(tree._tree, equal_to([
18.5,
9, 9.5,
7, 2, 3.5, 6,
2, 5, 1, 1, 0.5, 3, 1, 5
]))
| K-Kielak/gairl | tests/memory/test_sum_tree.py | test_sum_tree.py | py | 6,504 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gairl.memory.prioritized_replay_buffer._SumTree",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "hamcrest.assert_that",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "hamcrest.equal_to",
"line_number": 12,
"usage_type": "call"
},
{
... |
41019665619 | import os
from pathlib import Path
import structlog
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework_api_key",
"drf_spectacular",
"django_assets",
"django_celery_beat",
"django_celery_results",
"django_extensions",
"django_filters",
"django_admin_filters",
"crispy_forms",
"jsonify",
"import_export",
"gerrit",
"tracker",
"hotfix",
"panel",
"release_dashboard",
"build",
"buildinfo",
"release_changed",
"repoapi",
]
MIDDLEWARE = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django_structlog.middlewares.RequestMiddleware",
)
DJANGO_STRUCTLOG_CELERY_ENABLED = True
ROOT_URLCONF = "repoapi.urls"
LOGIN_URL = "rest_framework:login"
LOGOUT_URL = "rest_framework:logout"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ["repoapi/templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "repoapi.wsgi.application"
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"django_assets.finders.AssetsFinder",
)
STATIC_ROOT = BASE_DIR / "static_media/"
REST_FRAMEWORK = {
"PAGE_SIZE": 10,
"DEFAULT_PAGINATION_CLASS": "rest_framework"
".pagination.LimitOffsetPagination",
"DEFAULT_FILTER_BACKENDS": (
"django_filters.rest_framework.DjangoFilterBackend",
),
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
}
SPECTACULAR_SETTINGS = {
"TITLE": "RepoApi",
"DESCRIPTION": "repoapi, one ring to rule them all",
"VERSION": "1.0.0",
"CONTACT": {
"email": "development@sipwise.com",
"url": "https://www.sipwise.com/",
},
"LICENSE": {
"name": "GPLv3",
"url": "https://www.gnu.org/licenses/gpl-3.0.en.html",
},
}
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"plain_console": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.dev.ConsoleRenderer(),
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "plain_console",
}
},
"loggers": {
"django_structlog": {
"handlers": ["console"],
"level": "INFO",
"propagate": False,
},
"repoapi": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
},
}
JENKINS_TOKEN = "sipwise_jenkins_ci"
CRISPY_TEMPLATE_PACK = "bootstrap3"
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
CELERY_ACCEPT_CONTENT = ["application/json"]
CELERY_RESULT_BACKEND = "django-db"
CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
API_KEY_CUSTOM_HEADER = "HTTP_API_KEY"
| sipwise/repoapi | repoapi/settings/common.py | common.py | py | 4,245 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "structlog.stdlib",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "structlog.dev.ConsoleRenderer",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "str... |
31014089660 |
# Imports
import PySimpleGUI as sg
import openai
import os
from openai.error import APIConnectionError, AuthenticationError
import threading
from datetime import datetime
from peewee import SqliteDatabase, Model, CharField, TextField
# Envs
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
openai.api_key = os.getenv('openai.api_key')
# Definitions
APP_TITLE="GUI para Chat GPT"
FILE="Archivo"
RESET_CONTEXT="Restablecer contexto"
SAVE_CHAT="Exportar chat"
QUIT="Salir"
OPTIONS="Opciones"
LOAD_API_KEY="Cargar API Key"
HELP="Ayuda"
ABOUT="Acerca de"
DEFAULT_THEME="DarkGrey1"
CHAT_RESULT_KEY="-chat_result-"
PROMPT_KEY="-prompt_input-"
SUBMIT_KEY="-send_prompt-"
CHAT_LISTBOX = "-select-a-chat"
DELETE_CHAT_BUTTON = "-delete-chat-button"
REGENERATE_CHAT_BUTTON = "-regenerate-chat-button"
THEMES=["Temas", ["Default", "Black", "Dark15", "DarkGrey2", "DarkGrey3", "DarkBrown1"]]
ABOUT_TEXT = "ACERCA DE "
# Define database connection
db = SqliteDatabase('my_chats.db')
# Define table
class Chat(Model):
title = CharField()
query = TextField()
response = TextField()
class Meta:
database = db
# Migrate
db.create_tables([Chat])
# Program
class Application:
def __init__(self):
# Tema por defecto
sg.theme(DEFAULT_THEME)
# Lista de chats
self.chats = Chat.select()
# Contexto predeterminado
self.default_context = {"role": "system",
"content": "Eres un asistente muy útil."}
# Mensajes
self.messages = [self.default_context]
# Crea los elementos de la barra de menus
self.menu_def = [
[FILE, [RESET_CONTEXT, SAVE_CHAT, "---", QUIT]],
[OPTIONS, [THEMES, LOAD_API_KEY]],
[HELP, [ABOUT]]
]
self.menu_bar = sg.Menu(self.menu_def)
# Left frame
self.chat_list = sg.Listbox(values=list(map(lambda c : c.title, self.chats)), size=(25, 10), expand_y=True, enable_events=True, key=CHAT_LISTBOX)
self.new_chat_button = sg.Button("Regenerar consulta", key=REGENERATE_CHAT_BUTTON)
self.delete_chat_button = sg.Button("Eliminar", key=DELETE_CHAT_BUTTON)
self.left_frame_layout = [[self.chat_list],[self.new_chat_button, self.delete_chat_button]]
self.left_frame = sg.Frame(title="Historial de conversaciones", layout=self.left_frame_layout, expand_y=True)
# Crea los elementos del panel de la derecha
self.chat_result = sg.Multiline(size=(100, 25), key=CHAT_RESULT_KEY)
self.prompt_label = sg.Text("Sobre qué quieres hablar?:")
self.prompt = sg.Multiline(key=PROMPT_KEY, expand_x=True, size=(100, 5))
self.submit_button = sg.Button("Enviar", key=SUBMIT_KEY, enable_events=True, bind_return_key=True, expand_x=True)
self.right_frame_layout = [
[self.chat_result],
[self.prompt_label],
[self.prompt],
[self.submit_button]
]
self.right_frame = sg.Frame(title="Conversación", layout=self.right_frame_layout)
# Crea la ventana
self.layout = [
[self.menu_bar],
[self.left_frame, sg.VerticalSeparator(), self.right_frame]
]
self.window = sg.Window(APP_TITLE, self.layout)
# Inicia un bucle para manejar los eventos de la ventana
def start(self):
first_load = True
while True:
# Leer eventos
event, values = self.window.read()
if first_load:
self.refresh_chat_list()
first_load = False
# Cierra la ventana
if event == sg.WIN_CLOSED or event == QUIT:
break
# Click en Enviar
if event in (SUBMIT_KEY, 'Return:'+PROMPT_KEY):
# Si necesita clave de la api
if self.needs_api_key():
# Informar que tiene que cargar una clave
sg.popup("No se cargó ninguna API Key", "No se cargó ninguna API Key. Para conseguir una visita https://platform.openai.com\nLuego puedes cargarla a través de Opciones>Cargar API Key")
else:
# Obtener la consulta del usuario
query = values[PROMPT_KEY]
# Se pasa al método encargado de procesar la consulta
self.send_query(query)
# Limpiar el cuadro de consultas
self.window[PROMPT_KEY].update(value="")
# Cargar API KEY
elif event == LOAD_API_KEY:
self.load_api_key()
# Guardar conversacion en un archivo
elif event == SAVE_CHAT:
# Se solicita al usuario que escoga donde guardar
filename = sg.tk.filedialog.asksaveasfilename(
defaultextension='txt',
filetypes=(("Archivos de texto", "*.txt"), ("Todos los archivos", "*.*")),
parent=self.window.TKroot,
title="Guardar como",
initialfile=self.chat_list.get()[0]
)
# Si se escogió un archivo
if filename != None and len(filename) > 0:
# Se exporta la conversacion
self.save_chat_to(filename)
# Acerca de
elif event == ABOUT:
# Muestra cuadro acerca de
sg.popup(ABOUT_TEXT)
# Nuevo chat
elif event == RESET_CONTEXT:
self.reset_context()
elif event == CHAT_LISTBOX:
# Si la lista tiene al menos un elemento
if self.chat_list.get():
# Obtener el elemento seleccionado
selected_title = self.chat_list.get()[0]
# Cargar chat
self.load_chat(selected_title)
# Obtener el indice
index = self.chat_list.get_list_values().index(selected_title)
# Establecer como seleccionado
self.chat_list.update(set_to_index=index)
elif event == DELETE_CHAT_BUTTON:
delete = sg.popup_yes_no("Desea eliminar la conversación seleccionada?")
if delete == "Yes":
self.delete_chat(self.chat_list.get()[0])
elif event == REGENERATE_CHAT_BUTTON:
self.regenerate_query(self.chat_list.get()[0])
# Destruir/cerrar la ventana cuando finaliza el bucle
self.window.close()
# Procesar una consulta
def send_query(self, query):
# Crea un nuevo recurso
new_chat = Chat(title=query[:45]+str(datetime.now()), query=query, response="Esperando respuesta")
# Guarda en la db
new_chat.save()
# Delegacion de la carga
self.load_chat(new_chat.title)
# Crear un hilo para escuchar la respuesta del servidor sin bloquear la app
threading.Thread(target=self.push_response, args=[new_chat.title, query]).start()
def set_query_response(self, title, response):
# Busca por query
selected_chat = Chat.get(Chat.title == title)
# Actualiza la respuesta
selected_chat.response = response
# Guarda
selected_chat.save()
# Carga/muestra el resultado
self.load_chat(title)
def delete_chat(self, title):
selected_chat = Chat.get(Chat.title == title)
selected_chat.delete_instance()
self.refresh_chat_list()
def load_chat(self, title):
# Buscar consulta
chat_from_db = Chat.get(Chat.title == title)
# Armar el texto
chat_text = f"Usuario: {chat_from_db.query}\n"
chat_text += f"ChatBot: {chat_from_db.response}\n"
# Mostrar el texto en el chat view/result
self.window[CHAT_RESULT_KEY].update(value=chat_text)
self.refresh_chat_list()
def refresh_chat_list(self):
# Recargar la lista de chats
self.chats = Chat.select()
# Actualizar el listbox con los titulos
self.chat_list.update(values=[chat.title for chat in self.chats], set_to_index=len(self.chats)-1)
def regenerate_query(self, title):
# Buscamos el chat en la db
selected_chat = Chat.get(Chat.title == title)
# Y volvemos a enviar la consulta
self.send_query(selected_chat.query)
def load_api_key(self):
# Solicitar al usuario que ingrese la clave mediante ventana emergente
new_api_key = sg.popup_get_text(title="Cargar API Key", message="Pega aquí tu API Key:", default_text=openai.api_key)
# Si la nueva clave es ingresada, se guarda, sino se mantiene la anterior
openai.api_key = new_api_key if new_api_key != None else openai.api_key
with open(".env", "w") as file:
file.write(f"openai.api_key={openai.api_key}")
# Nuevo chat
def create_new_chat(self, title, content):
new_chat = {
"title": title,
"messages": content
}
self.chats.append(new_chat)
self.chat_list.update(values=[chat.title for chat in self.chats])
# Reiniciar chat
def reset_context(self):
self.messages = [self.default_context]
self.window[CHAT_RESULT_KEY].update(value="")
self.window[PROMPT_KEY].update(value="")
# Añadir texto al chat
def push_to_chat(self, name, text):
if len(self.chats) == 0:
self.create_new_chat(text[:20], text)
chat = self.window[CHAT_RESULT_KEY].get()
chat += "\n" if chat != "" else "" # Ahre
chat += name
chat += ": "
chat += text
chat += "\n"
self.window[CHAT_RESULT_KEY].update(value=chat)
def push_response(self, title, query):
try:
# Se agrega la consulta al contexto
self.messages.append({"role": "user", "content": query})
# Se envía la consulta
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=self.messages)
# Se saca lo importante de la respuesta
response_content = response.choices[0].message.content
# Lo agregamos al contexto
self.messages.append({"role": "assistant", "content": response_content})
# Lo asociamos con la consulta
self.set_query_response(title, response_content)
except APIConnectionError as ace:
self.push_to_chat("Sistema", "Ocurrió un error al conectarse con el servidor. Asegurate de que tienes accesso a internet")
except AuthenticationError as authEx:
self.push_to_chat("Sistema", "Error de autenticación. Asegúrese de haber proporcionado una API KEY válida")
def needs_api_key(self):
return openai.api_key == ""
def save_chat_to(self, filename):
with open(filename, "w") as file:
file.write(self.window[CHAT_RESULT_KEY].get())
app = Application()
app.start() | montexbjeliseo/gui_for_chatgpt | gui_chat_gpt_api_python.pyw | gui_chat_gpt_api_python.pyw | pyw | 11,379 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dotenv.find_dotenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "openai.api_key",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",... |
17895408730 | import tensorflow as tf
import utils # local file import from baselines.toxic_comments
class UtilsTest(tf.test.TestCase):
def test_make_cv_train_and_eval_splits(self):
num_folds = 10
train_fold_ids = ['2', '5']
(train_split, eval_split, train_folds, eval_folds,
eval_fold_ids) = utils.make_cv_train_and_eval_splits(
num_folds, train_fold_ids, return_individual_folds=True)
expected_train_folds = ['train[20%:30%]', 'train[50%:60%]']
expected_eval_folds = [
'train[0%:10%]', 'train[10%:20%]', 'train[30%:40%]', 'train[40%:50%]',
'train[60%:70%]', 'train[70%:80%]', 'train[80%:90%]', 'train[90%:100%]'
]
expected_eval_fold_ids = [0, 1, 3, 4, 6, 7, 8, 9]
self.assertEqual(train_split, 'train[20%:30%]+train[50%:60%]')
self.assertEqual(eval_split, '+'.join(expected_eval_folds))
self.assertListEqual(train_folds, expected_train_folds)
self.assertListEqual(eval_folds, expected_eval_folds)
self.assertListEqual(eval_fold_ids, expected_eval_fold_ids)
if __name__ == '__main__':
tf.test.main()
| google/uncertainty-baselines | baselines/toxic_comments/utils_test.py | utils_test.py | py | 1,078 | python | en | code | 1,305 | github-code | 36 | [
{
"api_name": "tensorflow.test",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "utils.make_cv_train_and_eval_splits",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.test.main",
"line_number": 30,
"usage_type": "call"
},
{
"api_... |
15680118405 | import json
import logging
from .codebase import CodebaseAgent
from .gpt_agent import GPTAgent, Role
class ProgrammerAgent:
def __init__(self, codebase_repo_path, gpt_api_key):
self.codebase_agent = CodebaseAgent(codebase_repo_path)
self.gpt_agent = GPTAgent(api_key=gpt_api_key, role=Role.PROGRAMMER,enable_memory=True)
def get_code(self, task_description):
"""Get code from a task description by querying a directory structure.
This function gathers the project info using a CodebaseAgent, then asks
a GPTAgent for the code. The function also handles JSONDecodeError and
general exceptions, logging the errors when they occur.
Args:
task_description (str): The task description to get code for.
Returns:
code_content (str): The requested code content, or 'No code found' if unavailable.
"""
try:
# Gather project info
project_structure = self.codebase_agent.get_directory_structure()
# Formulate query for GPTAgent
query = f'Given the project structure {project_structure}, {task_description}.'
response_content_str = self.gpt_agent.ask_query(query)
logging.info(f'Raw Response: {response_content_str}')
# Deserialize the response
response_content = json.loads(response_content_str)
# Retrieve code from response
code_content = response_content.get('code', 'No code found')
return code_content
except json.JSONDecodeError as e:
logging.error(f'JSON Decode Error: {e}')
logging.error(f'Failed Task Description: {task_description}')
except Exception as e:
logging.error(f'An unexpected error occurred: {e}') | csmathguy/SAGA | src/agent/programmer.py | programmer.py | py | 1,812 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "codebase.CodebaseAgent",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "gpt_agent.GPTAgent",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "gpt_agent.Role.PROGRAMMER",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": ... |
30076689122 | # general imports
from pathlib import Path
import os
import re
import argparse
from time import time
import multiprocessing as mp
from functools import partial
from collections import Counter
# processing imports
import numpy as np
import pandas as pd
from tqdm import tqdm
from collections import OrderedDict
from difflib import SequenceMatcher
import os
# pdfminer imports
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine
# local imports
import rse_watch.sententizer as sententizer
def get_list_of_pdfs_filenames(dirName):
"""
For the given path, get the List of all files in the directory tree
"""
paths = []
for path, subdirs, files in os.walk(dirName):
for name in files:
if (name.lower().endswith(".pdf")):
paths.append((Path(path + "/" + name)))
return paths
def get_companies_metadata_dict(config):
""" Read companies metadata from config and turn it into dictionnary"""
companies_metadata_dict = pd.read_csv(config.annotations_file,
sep=";",
encoding='utf-8-sig').set_index("project_denomination").T.to_dict()
return companies_metadata_dict
def clean_child_str(child_str):
child_str = ' '.join(child_str.split()).strip()
# dealing with hyphens:
# 1. Replace words separators in row by a different char than hyphen (i.e. longer hyphen)
child_str = re.sub("[A-Za-z] - [A-Za-z]", lambda x: x.group(0).replace(' - ', ' – '), child_str)
# 2. Attach the negative term to the following number, # TODO: inutile ? Enlever ?
child_str = re.sub("(- )([0-9])", r"-\2", child_str)
return child_str
class PDFPageDetailedAggregator(PDFPageAggregator):
"""
Custom class to parse pdf and keep position of parsed text lines.
"""
def __init__(self, rsrcmgr, pageno=1, laparams=None):
PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)
self.rows = []
self.page_number = 0
self.result = ""
def receive_layout(self, ltpage):
def render(item, page_number):
if isinstance(item, LTPage) or isinstance(item, LTTextBox):
for child in item:
render(child, page_number)
elif isinstance(item, LTTextLine):
child_str = ''
for child in item:
if isinstance(child, (LTChar, LTAnno)):
child_str += child.get_text()
child_str = clean_child_str(child_str)
if child_str:
# bbox == (pagenb, x1, y1, x2, y2, text)
row = (page_number, item.bbox[0], item.bbox[1], item.bbox[2], item.bbox[3], child_str)
self.rows.append(row)
for child in item:
render(child, page_number)
return
render(ltpage, self.page_number)
self.page_number += 1
self.rows = sorted(self.rows, key=lambda x: (x[0], -x[2]))
self.result = ltpage
def get_raw_content_from_pdf(input_file, rse_range=None):
"""
Parse pdf file, within rse range of pages if needed, and return list of rows with all metadata
:param input_file: PDF filename
:param rse_range: (nb_first_page_rse:int, nb_last_page_rse:int) tuple, starting at 1
:return: list of rows with (pagenb, x1, y1, x2, y2, text) and page_nb starts at 0!
"""
assert input_file.name.endswith(".pdf")
fp = open(input_file, 'rb')
parser = PDFParser(fp)
doc = PDFDocument(parser)
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageDetailedAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
if rse_range is not None and rse_range != "":
# start at zero to match real index of pages
pages_selection = range(rse_range[0] - 1, (rse_range[1] - 1) + 1)
else:
pages_selection = range(0, 10000)
first_page_nb = pages_selection[0] + 1 # to start indexation at 1
# Checked: only useful pages are actually parsed.
for nb_page_parsed, page in enumerate(PDFPage.create_pages(doc)):
if nb_page_parsed in pages_selection:
interpreter.process_page(page)
device.get_result()
return device, first_page_nb
def clean_paragraph(p):
""" Curate paragraph object before save, in particular deal with hyphen and spaces """
# Attach together words (>= 2 char to avoid things like A minus, B minus...)
# that may have been split at end of row like géographie = "géo - graphie"
# real separator have been turned into longer hyphen during parsing to avoid confusion with those.
# Accents accepted thks to https://stackoverflow.com/a/24676780/8086033
w_expr = "(?i)(?:(?![×Þß÷þø])[-'a-zÀ-ÿ]){2,}"
p["paragraph"] = re.sub("{} - {}".format(w_expr, w_expr),
lambda x: x.group(0).replace(' - ', ''),
p["paragraph"])
# reattach words that were split, like Fort-Cros = "Fort- Cros"
p["paragraph"] = re.sub("{}- {}".format(w_expr, w_expr),
lambda x: x.group(0).replace('- ', '-'),
p["paragraph"])
return p
def get_paragraphs_from_raw_content(device, idx_first_page):
"""
From parsed data with positional information, aggregate into paragraphs using simple rationale
:param device:
:param idx_first_page:
:param p: size of next gap needs to be smaller than previous min size of letters (among two last rows) times p
:return:
"""
# GROUPING BY COLUMN
column_text_dict = OrderedDict() # keep order of identification in the document.
APPROXIMATION_FACTOR = 10 # to allow for slight shifts at beg of aligned text
N_MOST_COMMON = 4 # e.g. nb max of columns of text that can be considered
LEFT_SECURITY_SHIFT = 20 # to include way more shifted text of previous column
counter = Counter()
item_holder = []
item_index = 0
it_was_last_item = False
while "There are unchecked items in device.rows":
# add the item to the list of the page
try:
(page_id, x_min, _, _, _, _) = device.rows[item_index]
except e:
print("Wrong index {} for device.rows of len {}".format(item_index, len(device.rows)))
print("was that last page ? : {]".format(it_was_last_item))
raise
item_holder.append(device.rows[item_index])
# increment the count of x_min
counter[(x_min // APPROXIMATION_FACTOR) * APPROXIMATION_FACTOR] += 1
# go to next item
it_was_last_item = item_index == (len(device.rows) - 1)
if not it_was_last_item:
item_index += 1
(next_page_id, _, _, _, _, _) = device.rows[item_index]
changing_page = (next_page_id > page_id)
if changing_page or it_was_last_item: # approximate next page
top_n_x_min_approx = counter.most_common(N_MOST_COMMON)
df = pd.DataFrame(top_n_x_min_approx, columns=["x_min_approx", "freq"])
df = df[df["freq"] > df["freq"].sum() * (1 / (N_MOST_COMMON + 1))].sort_values(by="x_min_approx")
x_min_approx = (df["x_min_approx"] - LEFT_SECURITY_SHIFT).values
x_min_approx = x_min_approx * (x_min_approx > 0)
left_x_min_suport = np.hstack([x_min_approx,
[10000]])
def x_grouper(x_min):
delta = left_x_min_suport - x_min
x_group = left_x_min_suport[np.argmin(delta < 0) * 1 - 1]
return x_group
# iter on x_group and add items
page_nb = idx_first_page + page_id
column_text_dict[page_nb] = {}
for item in item_holder:
(page_id, x_min, y_min, x_max, y_max, text) = item
page_nb = idx_first_page + page_id
x_group = x_grouper(x_min)
if x_group in column_text_dict[page_nb].keys():
column_text_dict[page_nb][x_group].append((y_min, y_max, text))
else:
column_text_dict[page_nb][x_group] = [(y_min, y_max, text)]
if it_was_last_item:
break
else:
# restart from zero for next page
counter = Counter()
item_holder = []
# CREATE THE PARAGRAPHS IN EACH COLUMN
# define minimal conditions to define a change of paragraph:
# Being spaced by more than the size of each line (min if different to account for titles)
pararaphs_list = []
paragraph_index = 0
for page_nb, x_groups_dict in column_text_dict.items():
for x_group_name, x_groups_data in x_groups_dict.items():
x_groups_data = sorted(x_groups_data, key=lambda x: x[0],
reverse=True) # sort vertically, higher y = before
x_groups_data_paragraphs = []
p = {"y_min": x_groups_data[0][0],
"y_max": x_groups_data[0][1],
"paragraph": x_groups_data[0][2]}
previous_height = p["y_max"] - p["y_min"]
previous_y_min = p["y_min"]
for y_min, y_max, paragraph in x_groups_data[1:]:
current_height = y_max - y_min
current_y_min = y_min
max_height = max(previous_height, current_height)
relative_var_in_height = (current_height - previous_height) / float(
current_height) # Was min before ???
relative_var_in_y_min = abs(current_y_min - previous_y_min) / float(current_height)
positive_change_in_font_size = (relative_var_in_height > 0.05)
change_in_font_size = abs(relative_var_in_height) > 0.05
different_row = (relative_var_in_y_min > 0.7)
large_gap = (relative_var_in_y_min > 1.2)
artefact_to_ignore = (len(paragraph) <= 2) # single "P" broke row parsing in auchan dpef
if not artefact_to_ignore:
if (positive_change_in_font_size and different_row) or large_gap: # always break
# break paragraph, start new one
# print("break",relative_var_in_height, relative_var_in_y_min, paragraph)
p = clean_paragraph(p)
x_groups_data_paragraphs.append(p)
p = {"y_min": y_min,
"y_max": y_max,
"paragraph": paragraph}
else:
# if change_in_font_size: # to separate titles
# paragraph = paragraph + ".\n"
# paragraph continues
p["y_min"] = y_min
p["paragraph"] = p["paragraph"] + " " + paragraph
previous_height = current_height
previous_y_min = current_y_min
# add the last paragraph of column
p = clean_paragraph(p)
x_groups_data_paragraphs.append(p)
# structure the output
for p in x_groups_data_paragraphs:
pararaphs_list.append({"paragraph_id": paragraph_index,
"page_nb": page_nb,
"x_group": x_group_name,
"y_min_paragraph": round(p["y_min"]),
"y_max_paragraph": round(p["y_max"]),
"paragraph": p["paragraph"]})
paragraph_index += 1
df_par = pd.DataFrame(data=pararaphs_list,
columns=["paragraph_id",
"page_nb",
"paragraph",
"x_group",
"y_min_paragraph",
"y_max_paragraph"])
return df_par
def parse_paragraphs_from_pdf(input_file, rse_ranges=None):
"""
From filename, parse pdf and output structured paragraphs with filter on rse_ranges uif present.
:param input_file: filename ending with ".pdf" or ".PDF".
:param rse_ranges: "(start, end)|(start, end)"
:return: df[[page_nb, page_text]] dataframe
"""
rse_ranges_list = list(map(eval, rse_ranges.split("|")))
df_paragraphs_list = []
for rse_range in rse_ranges_list:
df_par, idx_first_page = get_raw_content_from_pdf(input_file, rse_range=rse_range)
df_par = get_paragraphs_from_raw_content(df_par, idx_first_page)
df_paragraphs_list.append(df_par)
df_par = pd.concat(df_paragraphs_list, axis=0, ignore_index=True)
return df_par
def compute_string_similarity(a, b):
"""Compares two strings and returns a similarity ratio between 0 and 1 """
return SequenceMatcher(None, a, b).ratio()
def cut_footer(df_par, p=0.8, verbose=False):
"""
Cut the paragraph with lowest y_min if other paragraphs are similar.
The similarity is measured with function compute_string_similarity
"""
len_first = len(df_par)
footers = []
deno = df_par['project_denomination'].values[0]
c = 0
while True:
c += 1
len_start = len(df_par)
y_bottom = df_par['y_min_paragraph'].min()
y_top = df_par[df_par['y_min_paragraph'] == y_bottom]['y_max_paragraph'].min()
DSmin = df_par[(df_par['y_max_paragraph'] == y_top) & (df_par['y_min_paragraph'] == y_bottom)].copy()
if len(DSmin) == 1 and c == 1:
if verbose:
print('\n', deno)
return df_par
if len(DSmin) == 1:
break
for candidate in DSmin['paragraph'].values:
DSmin['is_foot'] = DSmin['paragraph'].apply(lambda x: compute_string_similarity(str(x), candidate) > p)
count = len((DSmin[DSmin['is_foot'] == True]))
if count > 1:
footers.append((candidate, count))
index_foot = DSmin[DSmin['is_foot'] == True].index
break
else:
DSmin = DSmin.drop(DSmin.index[0])
if len(footers) == 0:
if verbose:
print('\n', deno)
return df_par
len_end = (len(df_par[~df_par.index.isin(index_foot)]))
df_par = df_par[~df_par.index.isin(index_foot)]
if len_start == len_end:
break
# Below part is for human check that the function works properly
# if verbose:
# len_last = len(df_par)
# S = sum([i for _,i in footers])
# print('\n',deno)
# print(f"Removed {len_first-len_last} lines. {len_first-len_last==S}")
# if footers!=[]:
# L = [foot+" x "+ str(count) for foot, count in footers]
# print("Footers(s) --->\n",'\n '.join(L))
return df_par
def cut_header(df_par, p=0.8, verbose=False):
"Same as function cut_footer() but for headers"
len_first = len(df_par)
headers = []
deno = df_par['project_denomination'].values[0]
c = 0
while True:
c += 1
len_start = len(df_par)
y_top = df_par['y_max_paragraph'].max()
y_bottom = df_par[df_par['y_max_paragraph'] == y_top]['y_min_paragraph'].max()
DSmax = df_par[(df_par['y_max_paragraph'] == y_top) & (df_par['y_min_paragraph'] == y_bottom)].copy()
if len(DSmax) == 1 and c == 1:
if verbose:
print('\n', deno)
return df_par
if len(DSmax) == 1:
break
for candidate in DSmax['paragraph'].values:
DSmax['is_head'] = DSmax['paragraph'].apply(lambda x: compute_string_similarity(str(x), candidate) > p)
count = len((DSmax[DSmax['is_head'] == True]))
if count > 1:
headers.append((candidate, count))
index_head = DSmax[DSmax['is_head'] == True].index
break
else:
DSmax = DSmax.drop(DSmax.index[0])
if len(headers) == 0:
if verbose:
print('\n', deno)
return df_par
len_end = (len(df_par[~df_par.index.isin(index_head)]))
df_par = df_par[~df_par.index.isin(index_head)]
if len_start == len_end:
break
# Below part is for human check that the function works properly
# if verbose:
# len_last = len(df_par)
# S = sum([i for _, i in headers])
# print('\n', deno)
# print(f"Removed {len_first - len_last} lines. {len_first - len_last == S}")
# if headers != []:
# L = [head + " x " + str(count) for head, count in headers]
# print("Header(s) --->\n", '\n '.join(L))
return df_par
def extract_company_metadata(dpef_path, companies_metadata_dict):
""" From metadata dict and a dpef file, get the relevant info."""
project_denomination = dpef_path.name.split("_")[0]
document_year = dpef_path.name.split("_")[1]
company_name = companies_metadata_dict[project_denomination]["denomination"]
company_sectors = None
try:
company_sectors = companies_metadata_dict[project_denomination]["sectors"].split(";")
except KeyError:
print("Sectors not found for company {} for year {}".format(project_denomination,
document_year))
rse_ranges = None
try:
rse_ranges = companies_metadata_dict[project_denomination]["rse_ranges_" + document_year]
except KeyError:
print("RSE ranges not found for company {} for year {}".format(project_denomination,
document_year))
return company_name, project_denomination, company_sectors, document_year, rse_ranges
def get_paragraphs_dataframe_from_pdf(dpef_path, companies_metadata_dict):
"""
Parse a pdf and return a pandas df with paragraph level parsed text.
:param dpef_path_dict_annotations: (inpout_file, dict_annotations) tuple
:return:
"""
company_name, project_denomination, company_sectors, document_year, rse_ranges \
= extract_company_metadata(dpef_path, companies_metadata_dict)
t = time()
print("Start parsing - {} [{}]".format(
project_denomination,
dpef_path.name)
)
df_par = parse_paragraphs_from_pdf(dpef_path, rse_ranges=rse_ranges)
df_par.insert(0, "project_denomination", project_denomination)
df_par.insert(1, "company_sector", ";".join(company_sectors))
df_par.insert(2, "document_year", document_year)
df_par = df_par.drop_duplicates(['paragraph'])
df_par = cut_footer(df_par, verbose=True)
df_par = cut_header(df_par, verbose=True)
print("End parsing - {} [{}] - took {} seconds".format(
project_denomination,
dpef_path.name,
int(t - time()))
)
return df_par
def get_sentences_dataframe_from_pdf(config, dpef_path):
""" Parse a pdf and return a pandas df with sentence level parsed text"""
companies_metadata_dict = get_companies_metadata_dict(config)
df_par = get_paragraphs_dataframe_from_pdf(dpef_path, companies_metadata_dict)
df_sent = sententizer.get_sentence_dataframe_from_paragraph_dataframe(df_par, config)
return df_sent
def get_sentences_from_all_pdfs(config):
"""
Parses all dpefs into a sentence-level format and save the resulting csv according to config.
"""
companies_metadata_dict = get_companies_metadata_dict(config)
all_dpef_paths = get_list_of_pdfs_filenames(config.dpef_dir)
all_dpef_paths = [dpef_path for dpef_path in all_dpef_paths if
dpef_path.name.split("_")[0] in companies_metadata_dict.keys()]
print(all_dpef_paths)
# PARALLELIZATION
parallel_get_sentences_dataframe_from_pdf = partial(get_sentences_dataframe_from_pdf,
config)
n_cores = mp.cpu_count() - 1 or 1
with mp.Pool(n_cores) as pool:
print("Multiprocessing with {} cores".format(n_cores))
df_sents = list(
tqdm(
pool.imap(
parallel_get_sentences_dataframe_from_pdf,
all_dpef_paths
),
total=len(all_dpef_paths)
)
)
# concat
df_sents = pd.concat(df_sents, axis=0, ignore_index=True)
# create parent folder
pickle_path = config.parsed_sent_file.parent
pickle_path.mkdir(parents=True, exist_ok=True)
# save to csv
df_sents.to_csv(config.parsed_sent_file, sep=";", index=False, encoding='utf-8-sig')
return df_sents
def run(config):
"""
Parse the pdfs into structured csv formats
: param conf: conf object with relative paths.
:param task: "parser", "sententizer" or "both" ; Whether to parse
pdfs, sententize the paragraphs, or do both.
"""
df_sents = get_sentences_from_all_pdfs(config)
print(df_sents.shape)
return df_sents
| dataforgoodfr/batch7_rse | polls/rse_model/rse_watch/pdf_parser.py | pdf_parser.py | py | 21,499 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 56,
... |
7749159191 | from flask import Blueprint, request
from flask import jsonify, render_template
from authlib.specs.rfc6749 import OAuth2Error
from authlib.flask.oauth2 import current_token
from ..models import OAuth2Client, OAuth2Token, User
from ..auth import current_user
from ..forms.auth import ConfirmForm, LoginConfirmForm
from ..services.oauth2 import authorization, scopes, require_oauth
from urlparse import parse_qs
from flask_cors import CORS
bp = Blueprint('oauth2', __name__)
CORS(bp)
@bp.route('/authorize', methods=['GET', 'POST'])
def authorize():
curr_url = '/oauth2/authorize?' + request.query_string
##app.logger.info("method oauth2/authorize")
if current_user:
##app.logger.info("confirm form")
form = ConfirmForm()
else:
##app.logger.info("login confirm form")
form = LoginConfirmForm()
if form.validate_on_submit():
##app.logger.info("submit")
if form.confirm.data:
# granted by current user
grant_user = current_user
else:
grant_user = None
##app.logger.info("calling lib function 'create_authorization_response'")
ret = authorization.create_authorization_response(grant_user)
##app.logger.info("return")
return ret
try:
##app.logger.info("not submit")
##app.logger.info("calling lib function 'validate_authorization_request'")
grant = authorization.validate_authorization_request()
except OAuth2Error as error:
# TODO: add an error page
payload = dict(error.get_body())
return jsonify(payload), error.status_code
client = OAuth2Client.get_by_client_id(request.args['client_id'])
##app.logger.info("render")
return render_template(
'account/authorize.html',
grant=grant,
scopes=scopes,
client=client,
form=form,
)
@bp.route('/token', methods=['POST'])
def issue_token():
##app.logger.info("method oauth2/token")
##app.logger.info("calling lib function 'create_token_response'")
ret = authorization.create_token_response()
##app.logger.info("return")
return ret
@bp.route('/revoke', methods=['POST'])
def revoke_token():
return authorization.create_revocation_response()
@bp.route('/revoke_bearer', methods=['POST'])
def revoke_token_bearer():
##app.logger.info("method oauth2/revoke_bearer")
##app.logger.info("query token")
token = OAuth2Token.query_token(parse_qs(request.query_string)['token'][0])
if token:
##app.logger.info("revoke")
token.revoke()
##app.logger.info("return")
return jsonify(token)
return jsonify({'error': 'Invalid token supplied'}), 401
@bp.route('/tokeninfo', methods=['GET'])
def get_token_info():
##app.logger.info("method oauth2/tokeninfo")
if 'access_token' in request.args:
##app.logger.info("query token")
token = OAuth2Token.query_token(request.args['access_token'])
if token and token.user_id:
##app.logger.info("query user")
user = User.query.get(token.user_id)
udict = user.to_dict(request.host)
udict.update(token.to_dict())
##app.logger.info("return user info")
return jsonify(udict)
return jsonify({'error': 'Invalid token supplied'}), 401
return jsonify({'error': 'Invalid parameters supplied'}), 400
@bp.route('/emailinfo', methods=['GET'])
def get_email_info():
##app.logger.info("method oauth2/emailinfo")
if 'email' in request.args and 'access_token' in request.args:
##app.logger.info("query token")
token = OAuth2Token.query_token(request.args['access_token'])
email = request.args['email']
if token and token.user_id:
##app.logger.info("query user")
user = User.query_email(email)
if user:
udict = user.to_dict(request.host)
##app.logger.info("return user info")
return jsonify(udict)
return jsonify({'error': 'Invalid email supplied'}), 404
return jsonify({'error': 'Invalid token supplied'}), 401
return jsonify({'error': 'Invalid parameters supplied'}), 400
| itsocietysu/EACH-OAuth2.0 | website/routes/oauth2.py | oauth2.py | py | 4,226 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.request.query_string",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask... |
6243299984 | import requests
import pprint
import matplotlib.pyplot as plt
from tabulate import tabulate
import numpy as np
print('')
print('- GLOBAL INFO -')
print('')
# CONNECT WITH THE API
url = 'https://api.coingecko.com/api/v3/coins/categories'
r = requests.get(url)
response = r.json()
# pprint.pprint(response)
# exit()
# GET TOTAL MARKET CAP
url1 = 'https://api.coingecko.com/api/v3/global'
r1 = requests.get(url1)
response1 = r1.json()
total_marketcap = round(response1['data']['total_market_cap']['usd'], 3)
total_volume = round(response1['data']['total_volume']['usd'], 3)
print('Total Crypto MarketCap: {0:12,.3f}'.format(total_marketcap))
print('Total Crypto 24h Volume: {0:12,.3f}'.format(total_volume))
bb = round(total_volume / total_marketcap * 100, 2)
print('Total Volume 24h / Total MarketCap (%): ', bb ,'%')
print('')
# GET THE NAME OF THE SECTORES
sector_l = list()
id_l = list()
marketcap_l = list()
data_for_table_l = list()
market_share_perc_l = list()
volume_24h_l = list()
volume_24h_perc_l = list()
perc_vol_marketcap_l = list()
market_cap_changes_24h_l = list()
n_l = list()
positive_marketcap_changes = list()
negative_marketcap_changes = list()
for i in range(len(response)):
v = response[i]['name']
id = response[i]['id']
n = i + 1
try:
market_cap = round(float(response[i]['market_cap']), 3)
market_cap_changes_24h = round(float(response[i]['market_cap_change_24h']), 3)
except:
market_cap = 0
market_cap_changes_24h = 0
continue
perc = round(market_cap / total_marketcap * 100, 2)
try:
volume_24h = round(float(response[i]['volume_24h']), 3)
except:
volume_24h = 0
perc_volume = round(volume_24h / total_volume * 100, 2)
if market_cap != 0:
perc_vol_marketcap = round(volume_24h / market_cap * 100, 2)
else:
perc_vol_marketcap = 0
# INSERT INTO LISTS
sector_l.append(v)
marketcap_l.append(market_cap)
market_share_perc_l.append(perc)
volume_24h_l.append(volume_24h)
volume_24h_perc_l.append(perc_volume)
perc_vol_marketcap_l.append(perc_vol_marketcap)
id_l.append(id)
market_cap_changes_24h_l.append(market_cap_changes_24h)
n_l.append(n)
if market_cap_changes_24h > 0:
positive_marketcap_changes.append(market_cap_changes_24h)
elif market_cap_changes_24h <0:
negative_marketcap_changes.append(market_cap_changes_24h)
# CALCULATIONS
if market_cap / 1000000 < 1000:
marketcap2 = str(round(market_cap / 1000000, 3)) + ' M'
else:
marketcap2 = str(round(market_cap/1000000000, 3)) + ' B'
if volume_24h / 1000000 < 1000:
volume_24h2 = str(round(volume_24h / 1000000, 3)) + ' M'
else:
volume_24h2 = str(round(volume_24h / 1000000000, 3)) + ' B'
a = (n, v, id, marketcap2, perc, volume_24h2, perc_volume, perc_vol_marketcap, market_cap_changes_24h)
data_for_table_l.append(a)
# PRINT MORE VALUES
avg_marketcap_perc = round(np.average(market_cap_changes_24h_l), 3)
print('Avg MCap 24h %: ', avg_marketcap_perc, '%')
positive_avg_marketcap_perc = round(np.average(positive_marketcap_changes), 3)
print('Positive sectors - Avg MCap 24h %: ', positive_avg_marketcap_perc, '%')
negative_avg_marketcap_perc = round(np.average(negative_marketcap_changes), 3)
print('Negative sectors - Avg MCap 24h %: ', negative_avg_marketcap_perc, '%')
print('')
# PRINT VALUES IN TABLE
a = input('You want to see the values in table (yes/no): ')
if a == 'yes':
head = ['n',
'Sector',
'id',
'MarketCap',
'MCap-TotMCap %',
'Volume 24h',
'Vol-TotVol %',
'Vol-MCap %',
'MCap 24h %'
]
print(tabulate(data_for_table_l, headers=head, tablefmt='grid'))
# q1 = input('Do you wanna order by the MCap 24h % (yes/no): ')
# if q1 == 'yes':
# sorted_list = sorted(data_for_table_l, key=lambda x: x[8], reverse=True)
# head = ['n',
# 'Sector',
# 'id',
# 'MarketCap',
# 'MCap-TotMCap %',
# 'Volume 24h',
# 'Vol-TotVol %',
# 'Vol-MCap %',
# 'MCap 24h %'
# ]
# print(tabulate(sorted_list, headers=head, tablefmt='grid'))
#
# # CREATE GRAPH TO GET ALL INFO
#
# print('')
# b1 = input('Graphs (yes/no): ')
# if b1 == 'yes':
# b = input('Graph by market cap (yes/no): ')
# if b == 'yes':
# fig, (ax1, ax2) = plt.subplots(2, sharex=True)
# fig.suptitle ('All crypto sectores')
#
# ax1.bar(sector_l, marketcap_l)
# ax2.bar(sector_l, marketcap_l)
#
# plt.yscale('log')
# plt.xticks(fontsize=8, rotation='vertical')
# plt.ylabel('MarketCap')
#
#
# plt.tight_layout()
# plt.subplots_adjust(hspace=0.05)
# plt.show()
#
# c = input('Graph Market share (yes/no): ')
# if c =='yes':
# plt.bar(sector_l, market_share_perc_l)
# plt.title('Market share by sectores')
# plt.ylabel('%')
# plt.xticks(fontsize=8, rotation='vertical')
#
# plt.tight_layout()
# plt.show()
#
# d = input('Graph for MarketCap and Volume (yes/no): ')
# if d == 'yes':
# fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)
# fig.suptitle('Info for all sectores')
#
# size = np.arange(len(sector_l))
#
# bar1 = ax1.bar(size, marketcap_l, 0.35, label='MarketCap')
# bar2 = ax1.bar(size + 0.25, volume_24h_l, 0.35, label='Volume 24h')
#
# ax1.legend((bar1, bar2), ('MarketCap', 'Volume 24h'))
#
# bar3 = ax2.bar(size, market_share_perc_l, 0.35, label = 'Market share %')
# bar4 = ax2.bar(size + 0.5, volume_24h_perc_l, 0.35, label='Vol/Total Volume %')
# bar5 = ax2.bar(size + 0.25, perc_vol_marketcap_l, 0.35, label = 'Vol/MarketCap %')
#
# ax2.legend((bar3, bar4, bar5), ('Market share %', 'Vol/Total Volume %', 'Vol/MarketCap %'))
#
# bar6 = ax3.bar(size, market_share_perc_l, 0.35, label = 'Market share %')
# bar7 = ax3.bar(size + 0.5, volume_24h_perc_l, 0.35, label='Vol/Total Volume %')
# bar8 = ax3.bar(size + 0.25, perc_vol_marketcap_l, 0.35, label = 'Vol/MarketCap %')
#
# ax3.legend((bar6, bar7, bar8), ('Market share %', 'Vol/Total Volume %', 'Vol/MarketCap %'), prop={'size':6})
#
# plt.yscale('log')
#
# plt.xticks(size + 0.25, sector_l, fontsize=8, rotation='vertical')
# plt.subplots_adjust(hspace=0.05)
# plt.show()
print('\n-- DATABASE INFORMATION \n')
import sqlite3
# Create a connection to the SQLite database and create a cursor object
conn = sqlite3.connect("crypto_data.db")
cursor = conn.cursor()
# Get the current date
import datetime
current_date = datetime.datetime.now().strftime("%Y_%m_%d")
# Function to check if a column exists in a table
def column_exists(cursor, table_name, column_name):
cursor.execute(f"PRAGMA table_info({table_name})")
columns = [column[1] for column in cursor.fetchall()]
return column_name in columns
# Create market_cap table if it doesn't exist
cursor.execute("""
CREATE TABLE IF NOT EXISTS market_cap (
sector_id TEXT PRIMARY KEY,
sector_name TEXT
)
""")
print("Created market_cap table if not exists")
# Create daily_volume table if it doesn't exist
cursor.execute("""
CREATE TABLE IF NOT EXISTS daily_volume (
sector_id TEXT PRIMARY KEY,
sector_name TEXT
)
""")
print("Created daily_volume table if not exists")
# Add a prefix to the current_date to avoid column names starting with a number
current_date_column = f"date_{current_date}"
# Add a column for the current date if it doesn't exist
if not column_exists(cursor, "market_cap", current_date_column):
cursor.execute(f"ALTER TABLE market_cap ADD COLUMN {current_date_column} REAL")
print(f"Added column {current_date_column} to market_cap table")
if not column_exists(cursor, "daily_volume", current_date_column):
cursor.execute(f"ALTER TABLE daily_volume ADD COLUMN {current_date_column} REAL")
print(f"Added column {current_date_column} to daily_volume table")
# Update market cap and daily volume information for each sector
for i in range(len(response)):
sector_id = response[i]['id']
sector_name = response[i]['name']
try:
market_cap = round(float(response[i]['market_cap']), 3)
except:
market_cap = None
try:
volume_24h = round(float(response[i]['volume_24h']), 3)
except:
volume_24h = None
print(f"Processing sector: {sector_name}")
# Insert or update the sector in the market_cap table
cursor.execute("""
INSERT OR IGNORE INTO market_cap (sector_id, sector_name)
VALUES (?, ?)
""", (sector_id, sector_name))
if market_cap is not None:
cursor.execute(f"""
UPDATE market_cap
SET {current_date_column} = ?
WHERE sector_id = ?
""", (market_cap, sector_id))
print(f"Updated market cap for {sector_name} on {current_date}")
# Insert or update the sector in the daily_volume table
cursor.execute("""
INSERT OR IGNORE INTO daily_volume (sector_id, sector_name)
VALUES (?, ?)
""", (sector_id, sector_name))
if volume_24h is not None:
cursor.execute(f"""
UPDATE daily_volume
SET {current_date_column} = ?
WHERE sector_id = ?
""", (volume_24h, sector_id))
print(f"Updated daily volume for {sector_name} on {current_date}")
# Commit the changes and close the connection
conn.commit()
conn.close()
print("\n Changes committed and connection closed")
| jbbaptista/Personally | Sectores_coingecko/Add_daily_info_database.py | Add_daily_info_database.py | py | 9,661 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_nu... |
17585378222 | import numpy as np
import matplotlib.pyplot as plt
from starwhale import dataset
def show_image(image) -> None:
plt.imshow(image, cmap="gray")
plt.show(block=True)
ds_name = "fer2013/version/latest"
ds = dataset(ds_name)
row = ds.fetch_one()
data = row.features
show_image(
np.frombuffer(data["image"].to_bytes(), dtype=np.uint8).reshape(data["image"].shape)
)
print(data["label"])
| star-whale/starwhale | example/datasets/fer2013/example.py | example.py | py | 399 | python | en | code | 171 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplo... |
21315566191 | # D = len(data) ## number of docs...data is list of documents
# print('D: ', D)
import torch
import pickle
beta = torch.load('betas-40.pt', map_location=torch.device('cpu'))
beta = beta.numpy()
with open('vocab_50K.pkl', 'rb') as f:
vocab = pickle.load(f)
TC = []
num_topics = len(beta)
for k in range(num_topics):
print('k: {}/{}'.format(k, num_topics))
top_10 = list(beta[k].argsort()[-11:][::-1])
top_words = [vocab[a] for a in top_10]
TC_k = 0
counter = 0 | gretatuckute/neuralNLP | ETM_evaluation/testBetaOrder.py | testBetaOrder.py | py | 488 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 11,
"usage_type": "call"
}
] |
34683343714 | #!/home/kelsi/.virtualenvs/mtg/bin/python
# -*- coding: utf-8 -*-
from bottle import route, run, template, view
from datetime import date
import sqlite3
con = sqlite3.connect('mtg.db')
con.row_factory = sqlite3.Row
cursor = con.cursor()
@route('/hello/<name>')
@view('sample')
def hello(name):
return dict(name=name)
@route('/index')
@view('index')
def index():
return dict(date=date.today())
@route('/tournaments/<date>')
@view('tournament_index')
def tournament_index(date):
import datetime
pretty_date = datetime.date.fromtimestamp(float(date))
#pretty_date = pretty_date.strftime('%B %d, %Y')
cursor.execute('''select * from tourneys where date=?''', (date,))
return dict(date=pretty_date.strftime('%B %d, %Y'), cursor=cursor)
@route('/events/<eventid>')
@view('event_index')
def event_index(eventid):
import datetime
# Get generic tournament info
cursor.execute('''select * from tourneys where id=?''', (eventid,))
tourney_info = cursor.fetchone()
date = datetime.date.fromtimestamp(float(tourney_info['date']))
# Eventually want to show results, bracket
# For now, participants and their basic win/losses and deck
cursor.execute('''select participants.*, users.name, users.id as userid from participants left join users on participants.user_id = users.id where participants.tourney_id=? order by participants.rank''', (eventid,))
return dict(eventid=eventid, cursor=cursor, date=date.strftime('%d/%m/%y'), info=tourney_info)
@route('/users/<userid>')
@view('user_index')
def user_index(eventid):
# Want a view of users
# With links to tourneys played in and decks used
return dict(eventid=eventid, cursor=cursor, date=date.strftime('%d/%m/%y'), info=tourney_info)
@route('/cards/<cardid>')
@view('card_stats')
def user_index(cardid):
# Want a view of individual cards
# Tracking usage over time
# Want to be able to visualize multiple cards on same graph
# To compare usage...
cursor.execute('''select count(*) as decks, sum(quantity) as total from decklists where card=?''', (cardid,))
stats = cursor.fetchone()
return dict(cardid=cardid, stats=stats)
# Want a view of decks
# Need to be able to identify decks first!
# Want to be able to see deck usage over time
# Want to be able to compare to other decks over same time period
run(host='localhost', port=8000)
con.close()
| kelind/bio-mtg | mtg_bottle.py | mtg_bottle.py | py | 2,408 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlite3.Row",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "bottle.route",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bottle.view",
"line_... |
1133144480 | import pyfiglet
import platform
import os
import sys
import socket
import threading
from time import sleep
from queue import Queue
def banner():
banner = pyfiglet.figlet_format('PORT SCANNER')
print('\033[93m'+banner+'\033[m')
ports = Queue()
first_port = 1
last_port = 65535
for i in range(first_port, last_port+1):
ports.put(i)
def clear_screen():
if platform.system() == 'Windows':
os.system('cls')
elif platform.system() == 'Linux':
os.system('clear')
def validate_ip(ip):
splited_ip = ip.split('.')
if len(splited_ip) == 4:
for ip_part in splited_ip:
if len(ip_part) <= 3 and len(ip_part) > 0:
return True
else:
return False
else:
return False
def scan(port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
res = sock.connect_ex((target, port))
if res == 0:
print(f'Port {port} is open!')
sock.close()
except:
pass
def worker():
while not ports.empty():
port = ports.get()
scan(port)
ports.task_done()
def start_workers():
threads = []
for i in range(500):
thread = threading.Thread(target=worker, daemon=True)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def start_scanning():
clear_screen()
banner()
sleep(1)
print(f'Scanning the ports from {first_port} to {last_port}...\n')
start_workers()
banner()
try:
target = sys.argv[1]
except IndexError:
raise ValueError('\033[91mYou need to pass an IP address as an argument! e.g.'+
'python portscanner.py an.ip.address.here\033[m')
print('\033[93m'+
'--------------------Welcome to the port scanner!--------------------'+
'\033[m')
print(f'\n\nThis program will scan now the ports of the address {target}')
while True:
ip_is_okay = input('Is this okay? [Y/N]')
if ip_is_okay.strip().upper() == 'Y':
break
elif ip_is_okay.strip().upper() == 'N':
while True:
new_ip = input('Enter a new IP: ')
if validate_ip(new_ip):
target = new_ip
print('\033[34mYou succefully changed the target IP adress.\033[m')
sleep(1.5)
break
else:
print('''\033[91mERROR! The IP you writed is invalid. Please write
an valid ip address, in the format: 000.000.000.000\033[m \n''')
sleep(2)
continue
break
else:
continue
start_scanning()
print('\n\033[93mAll the ports were scanned. Goodbye!\033[m')
| ArthurDeveloper/portscanner-python | portscanner.py | portscanner.py | py | 2,780 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pyfiglet.figlet_format",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.system",
"l... |
21017557226 | """
The goal of this program is to optimize the movement to achieve a rudi out pike (803<) for left twisters.
"""
import os
import numpy as np
import biorbd_casadi as biorbd
from casadi import MX, Function
from bioptim import (
OptimalControlProgram,
DynamicsList,
DynamicsFcn,
ObjectiveList,
ObjectiveFcn,
BoundsList,
InitialGuessList,
InterpolationType,
OdeSolver,
Node,
Solver,
BiMappingList,
CostType,
ConstraintList,
ConstraintFcn,
PenaltyController,
MultiStart,
Solution,
MagnitudeType,
BiorbdModel,
)
import time
import pickle
class Model:
"""
Attributes
----------
model: str
A reference to the name of the model
with_hsl :
no hsl, don't use libhsl
n_threads : int
refers to the numbers of threads in the solver
savesol :
returns true if empty, else returns False
show_online : bool
returns true if empty, else returns False
print_ocp : bool
returns False if empty, else returns True """
def __init__(self, model, n_threads=5, with_hsl=False, savesol=False, show_online=False, print_ocp=False):
self.model = model
self.with_hsl = with_hsl
self.n_threads = n_threads
self.savesol = savesol
self.show_online = show_online
self.print_ocp = print_ocp
#
# # if savesol :
# # return False
#
# if show_online:
# return False
#
# if print_ocp:
# return True
# parser = argparse.ArgumentParser()
# parser.add_argument("model", type=str, help="the bioMod file")
# parser.add_argument("--no-hsl", dest='with_hsl', action='store_false', help="do not use libhsl")
# parser.add_argument("-j", default=1, dest='n_threads', type=int, help="number of threads in the solver")
# parser.add_argument("--no-sol", action='store_false', dest='savesol', help="do not save the solution")
# parser.add_argument("--no-show-online", action='store_false', dest='show_online', help="do not show graphs during optimization")
# parser.add_argument("--print-ocp", action='store_true', dest='print_ocp', help="print the ocp")
# args = parser.parse_args()
#
try:
import IPython
IPYTHON = True
except ImportError:
print("No IPython.")
IPYTHON = False
def minimize_dofs(controller: PenaltyController, dofs: list, targets: list):
diff = 0
for i, dof in enumerate(dofs):
diff += (controller.states['q'].cx_start[dof] - targets[i]) ** 2
return diff
def prepare_ocp(
biorbd_model_path: str, nb_twist: int, seed : int,
ode_solver: OdeSolver = OdeSolver.RK4(),
) -> OptimalControlProgram:
"""
Prepare the ocp
Parameters
----------
biorbd_model_path: str
The path to the bioMod file
ode_solver: OdeSolver
The ode solver to use
Returns
-------
The OptimalControlProgram ready to be solved
"""
final_time = 1.87
n_shooting = (40, 100, 100, 100, 40)
biomodel = (BiorbdModel(biorbd_model_path))
biorbd_model = (biomodel,biomodel, biomodel, biomodel,biomodel)
nb_q = biorbd_model[0].nb_q
nb_qdot = biorbd_model[0].nb_qdot
nb_qddot_joints = nb_q - biorbd_model[0].nb_root
# Pour la lisibilite
X = 0
Y = 1
Z = 2
Xrot = 3
Yrot = 4
Zrot = 5
ZrotBD = 6
YrotBD = 7
ZrotABD = 8
XrotABD = 9
ZrotBG = 10
YrotBG = 11
ZrotABG = 12
XrotABG = 13
XrotC = 14
YrotC = 15
vX = 0
vY = 1
vZ = 2
vXrot = 3
vYrot = 4
vZrot = 5
vZrotBD = 6
vYrotBD = 7
vZrotABD = 8
vYrotABD = 9
vZrotBG = 10
vYrotBG = 11
vZrotABG = 12
vYrotABG = 13
vXrotC = 14
vYrotC = 15
# Add objective functions
objective_functions = ObjectiveList()
# objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_MARKERS, marker_index=1, weight=-1)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="qddot_joints", node=Node.ALL_SHOOTING,
weight=1, phase=0)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="qddot_joints", node=Node.ALL_SHOOTING,
weight=1, phase=1)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="qddot_joints", node=Node.ALL_SHOOTING,
weight=1, phase=2)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="qddot_joints", node=Node.ALL_SHOOTING,
weight=1, phase=3)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="qddot_joints", node=Node.ALL_SHOOTING,
weight=1, phase=4)
objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_TIME, min_bound=.0, max_bound=1.0, weight=100000,
phase=0)
objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_TIME, min_bound=.0, max_bound=1.0, weight=100000,
phase=2)
objective_functions.add(ObjectiveFcn.Mayer.SUPERIMPOSE_MARKERS, node=Node.END, first_marker='MidMainG',
second_marker='CibleMainG', weight=1000, phase=0)
objective_functions.add(ObjectiveFcn.Mayer.SUPERIMPOSE_MARKERS, node=Node.END, first_marker='MidMainD',
second_marker='CibleMainD', weight=1000, phase=0)
# arrete de gigoter les bras
les_bras = [ZrotBD, YrotBD, ZrotABD, XrotABD, ZrotBG, YrotBG, ZrotABG, XrotABG]
les_coudes = [ZrotABD, XrotABD, ZrotABG, XrotABG]
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_coudes, targets=np.zeros(len(les_coudes)), weight=1000, phase=0)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_bras, targets=np.zeros(len(les_bras)), weight=10, phase=0)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_bras, targets=np.zeros(len(les_bras)), weight=10, phase=1)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_bras, targets=np.zeros(len(les_bras)), weight=10, phase=2)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_bras, targets=np.zeros(len(les_bras)), weight=10, phase=3)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_bras, targets=np.zeros(len(les_bras)), weight=10, phase=4)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_coudes, targets=np.zeros(len(les_coudes)), weight=1000, phase=4)
# ouvre les hanches rapidement apres la vrille
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Mayer, node=Node.END, dofs=[XrotC],
targets=[0], weight=10000, phase=3)
# Dynamics
dynamics = DynamicsList()
dynamics.add(DynamicsFcn.JOINTS_ACCELERATION_DRIVEN)
dynamics.add(DynamicsFcn.JOINTS_ACCELERATION_DRIVEN)
dynamics.add(DynamicsFcn.JOINTS_ACCELERATION_DRIVEN)
dynamics.add(DynamicsFcn.JOINTS_ACCELERATION_DRIVEN)
dynamics.add(DynamicsFcn.JOINTS_ACCELERATION_DRIVEN)
qddot_joints_min, qddot_joints_max, qddot_joints_init = -500, 500, 0
u_bounds = BoundsList()
for i in range(5):
u_bounds.add("qddot_joints", min_bound=[qddot_joints_min] * nb_qddot_joints, max_bound=[qddot_joints_max] * nb_qddot_joints, phase=i)
u_init = InitialGuessList()
for i in range(5):
u_init.add("qddot_joints", [qddot_joints_init] * nb_qddot_joints, phase=i)
u_init[i]["qddot_joints"].add_noise(
bounds=u_bounds[i]["qddot_joints"],
magnitude=0.2,
magnitude_type=MagnitudeType.RELATIVE,
n_shooting=n_shooting[i],
seed=seed,
)
# Path constraint
x_bounds = BoundsList()
for i in range(5):
x_bounds.add("q", min_bound=biorbd_model[0].bounds_from_ranges("q").min, max_bound=biorbd_model[0].bounds_from_ranges("q").max, phase=i)
x_bounds.add("qdot", min_bound=biorbd_model[0].bounds_from_ranges("qdot").min, max_bound=biorbd_model[0].bounds_from_ranges("qdot").max, phase=i)
# Pour la lisibilite
DEBUT, MILIEU, FIN = 0, 1, 2
#
# Contraintes de position: PHASE 0 la montee en carpe
#
zmax = 8
# 12 / 8 * final_time**2 + 1 # une petite marge
# deplacement
x_bounds[0]["q"].min[X, :] = -.1
x_bounds[0]["q"].max[X, :] = .1
x_bounds[0]["q"].min[Y, :] = -1.
x_bounds[0]["q"].max[Y, :] = 1.
x_bounds[0]["q"].min[:Z + 1, DEBUT] = 0
x_bounds[0]["q"].max[:Z + 1, DEBUT] = 0
x_bounds[0]["q"].min[Z, MILIEU:] = 0
x_bounds[0]["q"].max[Z, MILIEU:] = zmax # beaucoup plus que necessaire, juste pour que la parabole fonctionne
# le salto autour de x
x_bounds[0]["q"].min[Xrot, :] = 0
# 2 * 3.14 + 3 / 2 * 3.14 - .2
x_bounds[0]["q"].max[Xrot, :] = -.50 + 3.14
x_bounds[0]["q"].min[Xrot, DEBUT] = .50 # penche vers l'avant un peu carpe
x_bounds[0]["q"].max[Xrot, DEBUT] = .50
x_bounds[0]["q"].min[Xrot, MILIEU:] = 0
x_bounds[0]["q"].max[Xrot, MILIEU:] = 4 * 3.14 + .1 # salto
# limitation du tilt autour de y
x_bounds[0]["q"].min[Yrot, DEBUT] = 0
x_bounds[0]["q"].max[Yrot, DEBUT] = 0
x_bounds[0]["q"].min[Yrot, MILIEU:] = - 3.14 / 16 # vraiment pas suppose tilte
x_bounds[0]["q"].max[Yrot, MILIEU:] = 3.14 / 16
# la vrille autour de z
x_bounds[0]["q"].min[Zrot, DEBUT] = 0
x_bounds[0]["q"].max[Zrot, DEBUT] = 0
x_bounds[0]["q"].min[Zrot, MILIEU:] = -.1 # pas de vrille dans cette phase
x_bounds[0]["q"].max[Zrot, MILIEU:] = .1
# bras droit
x_bounds[0]["q"].min[YrotBD, DEBUT] = 2.9 # debut bras aux oreilles
x_bounds[0]["q"].max[YrotBD, DEBUT] = 2.9
x_bounds[0]["q"].min[ZrotBD, DEBUT] = 0
x_bounds[0]["q"].max[ZrotBD, DEBUT] = 0
# bras gauche
x_bounds[0]["q"].min[YrotBG, DEBUT] = -2.9 # debut bras aux oreilles
x_bounds[0]["q"].max[YrotBG, DEBUT] = -2.9
x_bounds[0]["q"].min[ZrotBG, DEBUT] = 0
x_bounds[0]["q"].max[ZrotBG, DEBUT] = 0
# coude droit
x_bounds[0]["q"].min[ZrotABD:XrotABD + 1, DEBUT] = 0
x_bounds[0]["q"].max[ZrotABD:XrotABD + 1, DEBUT] = 0
# coude gauche
x_bounds[0]["q"].min[ZrotABG:XrotABG + 1, DEBUT] = 0
x_bounds[0]["q"].max[ZrotABG:XrotABG + 1, DEBUT] = 0
# le carpe
x_bounds[0]["q"].min[XrotC, DEBUT] = -.50 # depart un peu ferme aux hanches
x_bounds[0]["q"].max[XrotC, DEBUT] = -.50
x_bounds[0]["q"].max[XrotC, FIN] = -2.5
# x_bounds[0].min[XrotC, FIN] = 2.7 # min du modele
# le dehanchement
x_bounds[0]["q"].min[YrotC, DEBUT] = 0
x_bounds[0]["q"].max[YrotC, DEBUT] = 0
x_bounds[0]["q"].min[YrotC, MILIEU:] = -.1
x_bounds[0]["q"].max[YrotC, MILIEU:] = .1
# Contraintes de vitesse: PHASE 0 la montee en carpe
vzinit = 9.81 / (2 * final_time ) # vitesse initiale en z du CoM pour revenir a terre au temps final
# decalage entre le bassin et le CoM
CoM_Q_sym = MX.sym('CoM', nb_q)
CoM_Q_init = x_bounds[0]["q"].min[:nb_q,
DEBUT] # min ou max ne change rien a priori, au DEBUT ils sont egaux normalement
CoM_Q_func = Function('CoM_Q_func', [CoM_Q_sym], [biorbd_model[0].center_of_mass(CoM_Q_sym)])
bassin_Q_func = Function('bassin_Q_func', [CoM_Q_sym],
[biorbd_model[0].homogeneous_matrices_in_global(CoM_Q_sym, 0).to_mx()]) # retourne la RT du bassin
r = np.array(CoM_Q_func(CoM_Q_init)).reshape(1, 3) - np.array(bassin_Q_func(CoM_Q_init))[-1,
:3] # selectionne seulement la translation de la RT
# en xy bassin
x_bounds[0]["qdot"].min[vX:vY + 1, :] = -10
x_bounds[0]["qdot"].max[vX:vY + 1, :] = 10
x_bounds[0]["qdot"].min[vX:vY + 1, DEBUT] = -.5
x_bounds[0]["qdot"].max[vX:vY + 1, DEBUT] = .5
# z bassin
x_bounds[0]["qdot"].min[vZ, :] = -50
x_bounds[0]["qdot"].max[vZ, :] = 50
x_bounds[0]["qdot"].min[vZ, DEBUT] = vzinit - .5
x_bounds[0]["qdot"].max[vZ, DEBUT] = vzinit + .5
# autour de x
x_bounds[0]["qdot"].min[vXrot, :] = .5 # d'apres une observation video
x_bounds[0]["qdot"].max[vXrot, :] = 20 # aussi vite que nécessaire, mais ne devrait pas atteindre cette vitesse
# autour de y
x_bounds[0]["qdot"].min[vYrot, :] = -50
x_bounds[0]["qdot"].max[vYrot, :] = 50
x_bounds[0]["qdot"].min[vYrot, DEBUT] = 0
x_bounds[0]["qdot"].max[vYrot, DEBUT] = 0
# autour de z
x_bounds[0]["qdot"].min[vZrot, :] = -50
x_bounds[0]["qdot"].max[vZrot, :] = 50
x_bounds[0]["qdot"].min[vZrot, DEBUT] = 0
x_bounds[0]["qdot"].max[vZrot, DEBUT] = 0
# tenir compte du decalage entre bassin et CoM avec la rotation
# Qtransdot = Qtransdot + v cross Qrotdot
borne_inf = (x_bounds[0]["qdot"].min[vX:vZ + 1, DEBUT] + np.cross(r, x_bounds[0]["qdot"].min[vXrot:vZrot + 1, DEBUT]))[0]
borne_sup = (x_bounds[0]["qdot"].max[vX:vZ + 1, DEBUT] + np.cross(r, x_bounds[0]["qdot"].max[vXrot:vZrot + 1, DEBUT]))[0]
x_bounds[0]["qdot"].min[vX:vZ + 1, DEBUT] = min(borne_sup[0], borne_inf[0]), min(borne_sup[1], borne_inf[1]), min(
borne_sup[2], borne_inf[2])
x_bounds[0]["qdot"].max[vX:vZ + 1, DEBUT] = max(borne_sup[0], borne_inf[0]), max(borne_sup[1], borne_inf[1]), max(
borne_sup[2], borne_inf[2])
# bras droit
x_bounds[0]["qdot"].min[vZrotBD:vYrotBD + 1, :] = -50
x_bounds[0]["qdot"].max[vZrotBD:vYrotBD + 1, :] = 50
x_bounds[0]["qdot"].min[vZrotBD:vYrotBD + 1, DEBUT] = 0
x_bounds[0]["qdot"].max[vZrotBD:vYrotBD + 1, DEBUT] = 0
# bras droit
x_bounds[0]["qdot"].min[vZrotBG:vYrotBG + 1, :] = -50
x_bounds[0]["qdot"].max[vZrotBG:vYrotBG + 1, :] = 50
x_bounds[0]["qdot"].min[vZrotBG:vYrotBG + 1, DEBUT] = 0
x_bounds[0]["qdot"].max[vZrotBG:vYrotBG + 1, DEBUT] = 0
# coude droit
x_bounds[0]["qdot"].min[vZrotABD:vYrotABD + 1, :] = -50
x_bounds[0]["qdot"].max[vZrotABD:vYrotABD + 1, :] = 50
x_bounds[0]["qdot"].min[vZrotABD:vYrotABD + 1, DEBUT] = 0
x_bounds[0]["qdot"].max[vZrotABD:vYrotABD + 1, DEBUT] = 0
# coude gauche
x_bounds[0]["qdot"].min[vZrotABD:vYrotABG + 1, :] = -50
x_bounds[0]["qdot"].max[vZrotABD:vYrotABG + 1, :] = 50
x_bounds[0]["qdot"].min[vZrotABG:vYrotABG + 1, DEBUT] = 0
x_bounds[0]["qdot"].max[vZrotABG:vYrotABG + 1, DEBUT] = 0
# du carpe
x_bounds[0]["qdot"].min[vXrotC, :] = -50
x_bounds[0]["qdot"].max[vXrotC, :] = 50
x_bounds[0]["qdot"].min[vXrotC, DEBUT] = 0
x_bounds[0]["qdot"].max[vXrotC, DEBUT] = 0
# du dehanchement
x_bounds[0]["qdot"].min[vYrotC, :] = -50
x_bounds[0]["qdot"].max[vYrotC, :] = 50
x_bounds[0]["qdot"].min[vYrotC, DEBUT] = 0
x_bounds[0]["qdot"].max[vYrotC, DEBUT] = 0
#
# Contraintes de position: PHASE 1 le salto carpe
#
# deplacement
x_bounds[1]["q"].min[X, :] = -.1
x_bounds[1]["q"].max[X, :] = .1
x_bounds[1]["q"].min[Y, :] = -1.
x_bounds[1]["q"].max[Y, :] = 1.
x_bounds[1]["q"].min[Z, :] = 0
x_bounds[1]["q"].max[Z, :] = zmax # beaucoup plus que necessaire, juste pour que la parabole fonctionne
# le salto autour de x
x_bounds[1]["q"].min[Xrot, :] = 0
x_bounds[1]["q"].max[Xrot, :] = -.50 + 4 * 3.14
x_bounds[1]["q"].min[Xrot, FIN] = 2 * 3.14 - .1
# limitation du tilt autour de y
x_bounds[1]["q"].min[Yrot, :] = - 3.14 / 16
x_bounds[1]["q"].max[Yrot, :] = 3.14 / 16
# la vrille autour de z
x_bounds[1]["q"].min[Zrot, :] = -.1
x_bounds[1]["q"].max[Zrot, :] = .1
# le carpe
x_bounds[1]["q"].max[XrotC, :] = -2.5
# le dehanchement
x_bounds[1]["q"].min[YrotC, DEBUT] = -.1
x_bounds[1]["q"].max[YrotC, DEBUT] = .1
# Contraintes de vitesse: PHASE 1 le salto carpe
# en xy bassin
x_bounds[1]["qdot"].min[vX:vY + 1, :] = -10
x_bounds[1]["qdot"].max[vX:vY + 1, :] = 10
# z bassin
x_bounds[1]["qdot"].min[vZ, :] = -50
x_bounds[1]["qdot"].max[vZ, :] = 50
# autour de x
x_bounds[1]["qdot"].min[vXrot, :] = -50
x_bounds[1]["qdot"].max[vXrot, :] = 50
# autour de y
x_bounds[1]["qdot"].min[vYrot, :] = -50
x_bounds[1]["qdot"].max[vYrot, :] = 50
# autour de z
x_bounds[1]["qdot"].min[vZrot, :] = -50
x_bounds[1]["qdot"].max[vZrot, :] = 50
# bras droit
x_bounds[1]["qdot"].min[vZrotBD:vYrotBD + 1, :] = -50
x_bounds[1]["qdot"].max[vZrotBD:vYrotBD + 1, :] = 50
# bras droit
x_bounds[1]["qdot"].min[vZrotBG:vYrotBG + 1, :] = -50
x_bounds[1]["qdot"].max[vZrotBG:vYrotBG + 1, :] = 50
# coude droit
x_bounds[1]["qdot"].min[vZrotABD:vYrotABD + 1, :] = -50
x_bounds[1]["qdot"].max[vZrotABD:vYrotABD + 1, :] = 50
# coude gauche
x_bounds[1]["qdot"].min[vZrotABD:vYrotABG + 1, :] = -50
x_bounds[1]["qdot"].max[vZrotABD:vYrotABG + 1, :] = 50
# du carpe
x_bounds[1]["qdot"].min[vXrotC, :] = -50
x_bounds[1]["qdot"].max[vXrotC, :] = 50
# du dehanchement
x_bounds[1]["qdot"].min[vYrotC, :] = -50
x_bounds[1]["qdot"].max[vYrotC, :] = 50
#
# Contraintes de position: PHASE 2 l'ouverture
#
# deplacement
x_bounds[2]["q"].min[X, :] = -.2
x_bounds[2]["q"].max[X, :] = .2
x_bounds[2]["q"].min[Y, :] = -1.
x_bounds[2]["q"].max[Y, :] = 1.
x_bounds[2]["q"].min[Z, :] = 0
x_bounds[2]["q"].max[Z, :] = zmax # beaucoup plus que necessaire, juste pour que la parabole fonctionne
# le salto autour de x
x_bounds[2]["q"].min[Xrot, :] = 2 * 3.14 - .1
x_bounds[2]["q"].max[Xrot, :] = -.50 + 4 * 3.14
# limitation du tilt autour de y
x_bounds[2]["q"].min[Yrot, :] = - 3.14 / 4
x_bounds[2]["q"].max[Yrot, :] = 3.14 / 4
# la vrille autour de z
x_bounds[2]["q"].min[Zrot, :] = 0
x_bounds[2]["q"].max[Zrot, :] = 3.14 # 5 * 3.14
x_bounds[2]["q"].min[XrotC, FIN] = -.4
# Contraintes de vitesse: PHASE 2 l'ouverture
# en xy bassin
x_bounds[2]["qdot"].min[vX:vY + 1, :] = -10
x_bounds[2]["qdot"].max[vX:vY + 1, :] = 10
# z bassin
x_bounds[2]["qdot"].min[vZ, :] = -50
x_bounds[2]["qdot"].max[vZ, :] = 50
# autour de x
x_bounds[2]["qdot"].min[vXrot, :] = -50
x_bounds[2]["qdot"].max[vXrot, :] = 50
# autour de y
x_bounds[2]["qdot"].min[vYrot, :] = -50
x_bounds[2]["qdot"].max[vYrot, :] = 50
# autour de z
x_bounds[2]["qdot"].min[vZrot, :] = -50
x_bounds[2]["qdot"].max[vZrot, :] = 50
# bras droit
x_bounds[2]["qdot"].min[vZrotBD:vYrotBD + 1, :] = -50
x_bounds[2]["qdot"].max[vZrotBD:vYrotBD + 1, :] = 50
# bras droit
x_bounds[2]["qdot"].min[vZrotBG:vYrotBG + 1, :] = -50
x_bounds[2]["qdot"].max[vZrotBG:vYrotBG + 1, :] = 50
# coude droit
x_bounds[2]["qdot"].min[vZrotABD:vYrotABD + 1, :] = -50
x_bounds[2]["qdot"].max[vZrotABD:vYrotABD + 1, :] = 50
# coude gauche
x_bounds[2]["qdot"].min[vZrotABD:vYrotABG + 1, :] = -50
x_bounds[2]["qdot"].max[vZrotABD:vYrotABG + 1, :] = 50
# du carpe
x_bounds[2]["qdot"].min[vXrotC, :] = -50
x_bounds[2]["qdot"].max[vXrotC, :] = 50
# du dehanchement
x_bounds[2]["qdot"].min[vYrotC, :] = -50
x_bounds[2]["qdot"].max[vYrotC, :] = 50
#
# Contraintes de position: PHASE 3 la vrille et demie
#
# deplacement
x_bounds[3]["q"].min[X, :] = -.2
x_bounds[3]["q"].max[X, :] = .2
x_bounds[3]["q"].min[Y, :] = -1.
x_bounds[3]["q"].max[Y, :] = 1.
x_bounds[3]["q"].min[Z, :] = 0
x_bounds[3]["q"].max[Z, :] = zmax # beaucoup plus que necessaire, juste pour que la parabole fonctionne
# le salto autour de x
x_bounds[3]["q"].min[Xrot, :] = 0
x_bounds[3]["q"].min[Xrot, :] = 2 * 3.14 - .1
x_bounds[3]["q"].max[Xrot, :] = 2 * 3.14 + 3 / 2 * 3.14 + .1 # 1 salto 3/4
x_bounds[3]["q"].min[Xrot, FIN] = 2 * 3.14 + 3 / 2 * 3.14 - .1
x_bounds[3]["q"].max[Xrot, FIN] = 2 * 3.14 + 3 / 2 * 3.14 + .1 # 1 salto 3/4
# limitation du tilt autour de y
x_bounds[3]["q"].min[Yrot, :] = - 3.14 / 4
x_bounds[3]["q"].max[Yrot, :] = 3.14 / 4
x_bounds[3]["q"].min[Yrot, FIN] = - 3.14 / 8
x_bounds[3]["q"].max[Yrot, FIN] = 3.14 / 8
# la vrille autour de z
x_bounds[3]["q"].min[Zrot, :] = 0
x_bounds[3]["q"].max[Zrot, :] = 5 * 3.14
x_bounds[3]["q"].min[Zrot, FIN] = nb_twist * 3.14 - .1 # complete la vrille
x_bounds[3]["q"].max[Zrot, FIN] = nb_twist * 3.14 + .1
# le carpe f4a les jambes
x_bounds[3]["q"].min[XrotC, :] = -.4
# le dehanchement
# Contraintes de vitesse: PHASE 3 la vrille et demie
# en xy bassin
x_bounds[3]["qdot"].min[vX:vY + 1, :] = -10
x_bounds[3]["qdot"].max[vX:vY + 1, :] = 10
# z bassin
x_bounds[3]["qdot"].min[vZ, :] = -50
x_bounds[3]["qdot"].max[vZ, :] = 50
# autour de x
x_bounds[3]["qdot"].min[vXrot, :] = -50
x_bounds[3]["qdot"].max[vXrot, :] = 50
# autour de y
x_bounds[3]["qdot"].min[vYrot, :] = -50
x_bounds[3]["qdot"].max[vYrot, :] = 50
# autour de z
x_bounds[3]["qdot"].min[vZrot, :] = -50
x_bounds[3]["qdot"].max[vZrot, :] = 50
# bras droit
x_bounds[3]["qdot"].min[vZrotBD:vYrotBD + 1, :] = -50
x_bounds[3]["qdot"].max[vZrotBD:vYrotBD + 1, :] = 50
# bras droit
x_bounds[3]["qdot"].min[vZrotBG:vYrotBG + 1, :] = -50
x_bounds[3]["qdot"].max[vZrotBG:vYrotBG + 1, :] = 50
# coude droit
x_bounds[3]["qdot"].min[vZrotABD:vYrotABD + 1, :] = -50
x_bounds[3]["qdot"].max[vZrotABD:vYrotABD + 1, :] = 50
# coude gauche
x_bounds[3]["qdot"].min[vZrotABD:vYrotABG + 1, :] = -50
x_bounds[3]["qdot"].max[vZrotABD:vYrotABG + 1, :] = 50
# du carpe
x_bounds[3]["qdot"].min[vXrotC, :] = -50
x_bounds[3]["qdot"].max[vXrotC, :] = 50
# du dehanchement
x_bounds[3]["qdot"].min[vYrotC, :] = -50
x_bounds[3]["qdot"].max[vYrotC, :] = 50
#
# Contraintes de position: PHASE 4 la reception
#
# deplacement
x_bounds[4]["q"].min[X, :] = -.1
x_bounds[4]["q"].max[X, :] = .1
x_bounds[4]["q"].min[Y, FIN] = -.1
x_bounds[4]["q"].max[Y, FIN] = .1
x_bounds[4]["q"].min[Z, :] = 0
x_bounds[4]["q"].max[Z, :] = zmax # beaucoup plus que necessaire, juste pour que la parabole fonctionne
x_bounds[4]["q"].min[Z, FIN] = 0
x_bounds[4]["q"].max[Z, FIN] = .1
# le salto autour de x
x_bounds[4]["q"].min[Xrot, :] = 2 * 3.14 + 3 / 2 * 3.14 - .2 # penche vers avant -> moins de salto
x_bounds[4]["q"].max[Xrot, :] = -.50 + 4 * 3.14 # un peu carpe a la fin
x_bounds[4]["q"].min[Xrot, FIN] = -.50 + 4 * 3.14 - .1 # salto fin un peu carpe
x_bounds[4]["q"].max[Xrot, FIN] = -.50 + 4 * 3.14 + .1 # salto fin un peu carpe
# limitation du tilt autour de y
x_bounds[4]["q"].min[Yrot, :] = - 3.14 / 16
x_bounds[4]["q"].max[Yrot, :] = 3.14 / 16
# la vrille autour de z
x_bounds[4]["q"].min[Zrot, :] = nb_twist * 3.14 - .1 # complete la vrille
x_bounds[4]["q"].max[Zrot, :] = nb_twist * 3.14 + .1
# bras droit
x_bounds[4]["q"].min[YrotBD, FIN] = 2.9 - .1 # debut bras aux oreilles
x_bounds[4]["q"].max[YrotBD, FIN] = 2.9 + .1
x_bounds[4]["q"].min[ZrotBD, FIN] = -.1
x_bounds[4]["q"].max[ZrotBD, FIN] = .1
# bras gauche
x_bounds[4]["q"].min[YrotBG, FIN] = -2.9 - .1 # debut bras aux oreilles
x_bounds[4]["q"].max[YrotBG, FIN] = -2.9 + .1
x_bounds[4]["q"].min[ZrotBG, FIN] = -.1
x_bounds[4]["q"].max[ZrotBG, FIN] = .1
# coude droit
x_bounds[4]["q"].min[ZrotABD:XrotABD + 1, FIN] = -.1
x_bounds[4]["q"].max[ZrotABD:XrotABD + 1, FIN] = .1
# coude gauche
x_bounds[4]["q"].min[ZrotABG:XrotABG + 1, FIN] = -.1
x_bounds[4]["q"].max[ZrotABG:XrotABG + 1, FIN] = .1
# le carpe
x_bounds[4]["q"].min[XrotC, :] = -.4
x_bounds[4]["q"].min[XrotC, FIN] = -.60
x_bounds[4]["q"].max[XrotC, FIN] = -.40 # fin un peu carpe
# le dehanchement
x_bounds[4]["q"].min[YrotC, FIN] = -.1
x_bounds[4]["q"].max[YrotC, FIN] = .1
# Contraintes de vitesse: PHASE 4 la reception
# en xy bassin
x_bounds[4]["qdot"].min[vX:vY + 1, :] = -10
x_bounds[4]["qdot"].max[vX:vY + 1, :] = 10
# z bassin
x_bounds[4]["qdot"].min[vZ, :] = -50
x_bounds[4]["qdot"].max[vZ, :] = 50
# autour de x
x_bounds[4]["qdot"].min[vXrot, :] = -50
x_bounds[4]["qdot"].max[vXrot, :] = 50
# autour de y
x_bounds[4]["qdot"].min[vYrot, :] = -50
x_bounds[4]["qdot"].max[vYrot, :] = 50
# autour de z
x_bounds[4]["qdot"].min[vZrot, :] = -50
x_bounds[4]["qdot"].max[vZrot, :] = 50
# bras droit
x_bounds[4]["qdot"].min[vZrotBD:vYrotBD + 1, :] = -50
x_bounds[4]["qdot"].max[vZrotBD:vYrotBD + 1, :] = 50
# bras droit
x_bounds[4]["qdot"].min[vZrotBG:vYrotBG + 1, :] = -50
x_bounds[4]["qdot"].max[vZrotBG:vYrotBG + 1, :] = 50
# coude droit
x_bounds[4]["qdot"].min[vZrotABD:vYrotABD + 1, :] = -50
x_bounds[4]["qdot"].max[vZrotABD:vYrotABD + 1, :] = 50
# coude gauche
x_bounds[4]["qdot"].min[vZrotABD:vYrotABG + 1, :] = -50
x_bounds[4]["qdot"].max[vZrotABD:vYrotABG + 1, :] = 50
# du carpe
x_bounds[4]["qdot"].min[vXrotC, :] = -50
x_bounds[4]["qdot"].max[vXrotC, :] = 50
# du dehanchement
x_bounds[4]["qdot"].min[vYrotC, :] = -50
x_bounds[4]["qdot"].max[vYrotC, :] = 50
#
# Initial guesses
#
x0 = np.vstack((np.zeros((nb_q, 2)), np.zeros((nb_qdot, 2))))
x1 = np.vstack((np.zeros((nb_q, 2)), np.zeros((nb_qdot, 2))))
x2 = np.vstack((np.zeros((nb_q, 2)), np.zeros((nb_qdot, 2))))
x3 = np.vstack((np.zeros((nb_q, 2)), np.zeros((nb_qdot, 2))))
x4 = np.vstack((np.zeros((nb_q, 2)), np.zeros((nb_qdot, 2))))
# bras droit f4a la vrille
# décollage prise del aposition carpée
x0[Xrot, 0] = .50
x0[ZrotBG] = -.75
x0[ZrotBD] = .75
x0[YrotBG, 0] = -2.9
x0[YrotBD, 0] = 2.9
x0[YrotBG, 1] = -1.35
x0[YrotBD, 1] = 1.35
x0[XrotC, 0] = -.5
x0[XrotC, 1] = -2.6
# rotater en salto (x) en carpé
x1[ZrotBG] = -.75
x1[ZrotBD] = .75
x1[Xrot, 1] = 2 * 3.14
x1[YrotBG] = -1.35
x1[YrotBD] = 1.35
x1[XrotC] = -2.6
# ouverture des hanches
x2[Xrot] = 2 * 3.14
x2[Zrot, 1] = 0.2
x2[ZrotBG, 0] = -.75
x2[ZrotBD, 0] = .75
x2[YrotBG, 0] = -1.35
x2[YrotBD, 0] = 1.35
x2[XrotC, 0] = -2.6
# Vrille en position tendue
x3[Xrot, 0] = 2 * 3.14
x3[Xrot, 1] = 2 * 3.14 + 3 / 2 * 3.14
x3[Zrot, 0] = 0 # METTRE 0 ?
x3[Zrot, 1] = nb_twist * 3.14
# Aterrissage (réduire le tilt)
x4[Xrot, 0] = 2 * 3.14 + 3 / 2 * 3.14
x4[Xrot, 1] = 4 * 3.14
x4[Zrot] = nb_twist * 3.14
x4[XrotC, 1] = -.5
x_init = InitialGuessList()
x_init.add("q", initial_guess=x0[:nb_q, :], interpolation=InterpolationType.LINEAR, phase=0)
x_init.add("qdot", initial_guess=x0[nb_q:, :], interpolation=InterpolationType.LINEAR, phase=0)
x_init.add("q", initial_guess=x1[:nb_q, :], interpolation=InterpolationType.LINEAR, phase=1)
x_init.add("qdot", initial_guess=x1[nb_q:, :], interpolation=InterpolationType.LINEAR, phase=1)
x_init.add("q", initial_guess=x2[:nb_q, :], interpolation=InterpolationType.LINEAR, phase=2)
x_init.add("qdot", initial_guess=x2[nb_q:, :], interpolation=InterpolationType.LINEAR, phase=2)
x_init.add("q", initial_guess=x3[:nb_q, :], interpolation=InterpolationType.LINEAR, phase=3)
x_init.add("qdot", initial_guess=x3[nb_q:, :], interpolation=InterpolationType.LINEAR, phase=3)
x_init.add("q", initial_guess=x4[:nb_q, :], interpolation=InterpolationType.LINEAR, phase=4)
x_init.add("qdot", initial_guess=x4[nb_q:, :], interpolation=InterpolationType.LINEAR, phase=4)
for i in range(5):
x_init[i]["q"].add_noise(
bounds=x_bounds[i]["q"],
n_shooting=np.array(n_shooting[i])+1,
magnitude=0.2,
magnitude_type=MagnitudeType.RELATIVE,
seed=seed,
)
x_init[i]["qdot"].add_noise(
bounds=x_bounds[i]["qdot"],
n_shooting=np.array(n_shooting[i])+1,
magnitude=0.2,
magnitude_type=MagnitudeType.RELATIVE,
seed=seed,
)
constraints = ConstraintList()
constraints.add(ConstraintFcn.SUPERIMPOSE_MARKERS, node=Node.ALL_SHOOTING, min_bound=-.1, max_bound=.1,
first_marker='MidMainG', second_marker='CibleMainG', phase=1)
constraints.add(ConstraintFcn.SUPERIMPOSE_MARKERS, node=Node.ALL_SHOOTING, min_bound=-.1, max_bound=.1,
first_marker='MidMainD', second_marker='CibleMainD', phase=1)
constraints.add(ConstraintFcn.TIME_CONSTRAINT, node=Node.END, min_bound=1e-4, max_bound=1.5, phase=1)
constraints.add(ConstraintFcn.TIME_CONSTRAINT, node=Node.END, min_bound=1e-4, max_bound=0.7, phase=3)
constraints.add(ConstraintFcn.TIME_CONSTRAINT, node=Node.END, min_bound=1e-4, max_bound=0.5, phase=4)
return OptimalControlProgram(
biorbd_model,
dynamics,
n_shooting,
[final_time / len(biorbd_model)] * len(biorbd_model),
x_init=x_init,
u_init=u_init,
x_bounds=x_bounds,
u_bounds=u_bounds,
objective_functions=objective_functions,
constraints=constraints,
n_threads=5,
)
def construct_filepath(biorbd_model_path, nb_twist, seed):
stunts = dict({3: "vrille_et_demi", 5: "double_vrille_et_demi", 7: "triple_vrille_et_demi"})
stunt = stunts[nb_twist]
athlete = biorbd_model_path.split('/')[-1].removesuffix('.bioMod')
title_before_solve = f"{athlete}_{stunt}_{seed}"
return title_before_solve
def save_results(sol: Solution,
*combinatorial_parameters,
**extra_parameter):
"""
Solving the ocp
Parameters
----------
sol: Solution
The solution to the ocp at the current pool
"""
title_before_solve = construct_filepath(biorbd_model_path, nb_twist, seed)
convergence = sol.status
dict_state = {}
q = []
qdot = []
tau = []
for i in range(len(sol.states)) :
q.append(sol.states[i]['q'])
qdot.append(sol.states[i]['qdot'])
tau.append(sol.controls[i]['qddot_joints'])
dict_state['q'] = q
dict_state['qdot'] = qdot
dict_state['tau'] = tau
del sol.ocp
dict_state['sol'] = sol
if convergence == 0 :
convergence = 'CVG'
print(f'{athlete} doing' + f' {stunt}' + ' converge')
else:
convergence = 'DVG'
print(f'{athlete} doing ' + f'{stunt}' + ' doesn t converge')
if save_folder:
with open(f'{save_folder}/{title_before_solve}_{convergence}.pkl', "wb") as file:
pickle.dump(dict_state, file)
else:
raise RuntimeError(f"This folder {save_folder} does not exist")
def should_solve(*combinatorial_parameters, **extra_parameters):
"""
Check if the filename already appears in the folder where files are saved, if not ocp must be solved
"""
biorbd_model_path, nb_twist, seed = combinatorial_parameters
save_folder = extra_parameters["save_folder"]
file_path = construct_filepath(biorbd_model_path, nb_twist, seed)
already_done_filenames = os.listdir(f"{save_folder}")
if file_path not in already_done_filenames:
return True
else:
return False
def prepare_multi_start(
combinatorial_parameters: dict[tuple,...],
save_folder: str = None,
n_pools: int = 6
) -> MultiStart:
"""
The initialization of the multi-start
"""
return MultiStart(
combinatorial_parameters=combinatorial_parameters,
prepare_ocp_callback=prepare_ocp,
post_optimization_callback=(save_results, {'save_folder': save_folder}),
should_solve_callback=(should_solve, {'save_folder': save_folder}),
solver=Solver.IPOPT(show_online_optim=False), # You cannot use show_online_optim with multi-start
n_pools=n_pools,
)
def main():
"""
Prepares and solves an ocp for a 803<. Animates the results
"""
seed = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
nb_twist = [3, 5]
athletes = [
# "AdCh",
# "AlAd",
# "AuJo",
# "Benjamin",
# "ElMe",
# "EvZl",
# "FeBl",
# "JeCh",
# "KaFu",
# "KaMi",
# "LaDe",
# "MaCu",
# "MaJa",
# "OlGa",
"Sarah",
# "SoMe",
# "WeEm",
# "ZoTs"
]
all_paths = []
for athlete in athletes :
path = f'{athlete}'+'.bioMod'
biorbd_model_path = "Models/Models_Lisa/" + f'{path}'
all_paths.append(biorbd_model_path)
combinatorial_parameters = {'bio_model_path': all_paths,
'nb_twist': nb_twist,
'seed': seed}
save_folder = "Multistart_double_vrille"
multi_start = prepare_multi_start(combinatorial_parameters=combinatorial_parameters, save_folder=save_folder, n_pools=6)
multi_start.solver = Solver.IPOPT(show_online_optim=False, show_options=dict(show_bounds=False))
#if Mod.with_hsl:
multi_start.solver.set_linear_solver('ma57')
#else:
# print("Not using ma57")
multi_start.solver.set_maximum_iterations(3000)
multi_start.solver.set_convergence_tolerance(1e-4)
#multi_start.solver.set_print_level(0)
multi_start.solve()
#sol.graphs(show_bounds=True, show_now=False, save_path=f'{folder}/{athlete}')
if __name__ == "__main__":
main()
| EveCharbie/AnthropoImpactOnTech | Tech_opt_MultiStart.py | Tech_opt_MultiStart.py | py | 34,211 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "bioptim.PenaltyController",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "bioptim.OdeSolver",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "bioptim.OdeSolver.RK4",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "bio... |
10528069074 | from PIL import Image
from torchvision import transforms
from JointCompose import JointCompose, IMG_ONLY_TRANSFORM, MASK_ONLY_TRANSFORM, RANDOM_JOINT_TRANSFORM_WITH_BORDERS, BORDER_ONLY_TRANSFORM, JOINT_TRANSFORM_WITH_BORDERS
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
from skimage.segmentation import mark_boundaries, find_boundaries
from skimage.exposure import adjust_gamma, rescale_intensity, equalize_hist, equalize_adapthist
from skimage.util import img_as_float
from skimage.segmentation import quickshift, felzenszwalb, slic
import random
from skimage.color import rgb2lab, rgb2grey
from skimage import filters
import cv2
IMG_SIZE = 256
class TransformSpec:
def __init__(self, transform, transform_type, prob = None):
self.transform = transform
self.transform_type = transform_type
self.prob = prob
class Flip(object):
"""flips the given PIL Image horizontally or vertically.
param type: 0 for horizontal flip, 1 for vertical flip
"""
def __init__(self, flip_type):
self.type = flip_type
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: flipped image.
"""
if self.type == 0:
return img.transpose(Image.FLIP_LEFT_RIGHT)
else:
return img.transpose(Image.FLIP_TOP_BOTTOM)
# aside: transforms are written as callable classes instead of simple functions so that parameters
# of the transform need not be passed everytime it is called. For this, we just need to implement
# __call__ method and if required, __init__ method.
class Segment(object):
def __call__(self, img):
# img is a numpy rgb image
grey_img = rgb2grey(img)
t1 = filters.threshold_minimum(grey_img)
t2 = filters.threshold_yen(grey_img)
img1 = mark_boundaries(img, (grey_img > t1), color=(1,0,0))
img1 = mark_boundaries(img1, (grey_img > t2), color=(1,0,0))
img2 = mark_boundaries(img, grey_img < 0)
img = ((img1+img2)/2)
#img = mark_boundaries(img, quickshift(img_as_float(img), kernel_size =5, max_dist = 10, ratio = 1.0))
#img = mark_boundaries(img, slic(img_as_float(img), n_segments=10))
#fimg = rgb2grey(img)
#t = filters.threshold_otsu(fimg)
#img = mark_boundaries(img, (fimg > t).astype(np.uint8), color=(1,0,0))
#img = mark_boundaries(img, (fimg - filters.threshold_niblack(fimg)< 0).astype(np.uint8), color=(1,0,0))
#img_gray = rgb2grey(img)
#img_gray = img[:, :, 1]
# morphological opening (size tuned on training data)
#circle7 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
#img_open = cv2.morphologyEx(img_gray, cv2.MORPH_OPEN, circle7)
# Otsu thresholding
#img_th = cv2.threshold(img_open, 0, 255, cv2.THRESH_OTSU)[1]
# Invert the image in case the objects of interest are in the dark side
#if (np.sum(img_th == 255) > np.sum(img_th == 0)):
# img_th = cv2.bitwise_not(img_th)
# second morphological opening (on binary image this time)
#bin_open = cv2.morphologyEx(img_th, cv2.MORPH_OPEN, circle7)
# connected components
#img = mark_boundaries(img,cv2.connectedComponents(bin_open)[1], color=(1,0,0))
return (img*255).astype(np.uint8)
class JitterBrightness(object):
def __call__(self, img):
# img is a numpy rgb image
gamma = random.random() + 0.3
return adjust_gamma(img, gamma)
class Rescale(object):
def __call__(self, img):
# img is a numpy rgb image
return equalize_adapthist(img)
class Negative(object):
def __call__(self, img):
# img is a numpy rgb image
return rescale_intensity(255-img)
class To3D(object):
# make into a 3d RGB-like array required for making it a PIL image and then a tensor
def __call__(self, mask):
h = mask.shape[0]
w = mask.shape[1]
mask_rgb = np.zeros((h,w,3))
for i in xrange(h):
for j in xrange(w):
if mask[i,j] == 1:
mask_rgb[i,j,:] = 255
return mask_rgb.astype(np.uint8)
class To1Ch(object):
def __call__(self, img, channel = 0):
return img[:,:,channel][:,:,None]
class Binarize(object):
def __call__(self, img):
img[img > 0.5] = 1.0
img[img < 1.0] = 0.0
return img
class ElasticTransform(object):
'''
sigma: positive float for smoothing the transformation (elasticy of the transformation.)
If sigma is small teh field looks like a completely random field after normalization
For intermidiate sigma values the displacement fields look like elastic deformation, where sigma is the elasticity coefficient.
If sigma is large, the displacements become close to affine. If sigma is very large the displacements become translations.
alpha: scaling facor - positive float giving the intensity of the transformation. Larger alphas require larger sigmas
default values take from the paper
'''
def __init__(self, sigma=1.5, alpha=34.0):
'''
:param sigma: positive floaf giving the elasticity of the transformation
:param alpha: positive float giving the intensity of the transformation
'''
self.sigma = sigma
self.alpha = alpha
def __call__(self, img, mask, borders):
if len(mask.shape) == 2:
# merge the image and the mask
merged_img = np.zeros(img.shape)
merged_img[:,:,] = img[:,:,]
merged_img[:,:,0] = mask[:,:]
# apply elastic deformation on the merged image
[deformed_merged_img, deformed_borders] = self.__elastic_deformation__([merged_img, borders])
# split image and mask from the merged deformed image
# mask
deformed_mask = np.zeros(mask.shape)
deformed_mask[:,:] = deformed_merged_img[:, :, 0]
self.dichotom(deformed_mask, 0.5, 1.0)
# image
deformed_img = deformed_merged_img[:,:,:]
deformed_img[:,:,0] = img[:,:,0]
else:
[deformed_img, deformed_mask, deformed_borders] = self.__elastic_deformation__([img, mask, borders])
return deformed_img.astype(np.uint8), deformed_mask.astype(np.uint8), deformed_borders.astype(np.uint8)
'''
based on the paper 'Best Practices for Convolutional Neural Networks Applied to Visual Document Analysis' Simard et al 2003
generalized the following implementation: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a
Works on numpy images
'''
def __elastic_deformation__(self, imgs):
img = imgs[0]
# img is a numpy image
shape = img.shape
n_dim = len(shape)
convolved_displacement_fields = []
grid = []
fsize = len(img.flatten())
for i in xrange(n_dim):
if i < 2: # don't touch the channel
cdf = np.array([random.random() for j in xrange(fsize)]).reshape(shape) * 2 - 1
convolved_displacement_fields.append(
gaussian_filter(cdf, self.sigma, mode="constant", cval=0) * self.alpha)
grid.append(np.arange(shape[i]))
grid = np.meshgrid(*grid, indexing='ij')
indices = []
for i in xrange(n_dim):
if i < 2: # don't touch the channel
indices.append(np.reshape(grid[i] + convolved_displacement_fields[i], (-1, 1)))
else:
indices.append(np.reshape(grid[i], (-1, 1)))
deformed_imgs = [map_coordinates(my_img, indices, order=3).reshape(shape) for my_img in imgs]
return deformed_imgs
def dichotom(self, img, thr, v1, v0=0):
if len(img.shape) == 2:
img[img > thr] = v1
img[img < v1] = v0
else:
height, width, channel = img.shape
for i in xrange(height):
for j in xrange(width):
for k in xrange(channel):
if img[i, j, k] == thr:
img[i, j, :] = v1
break
img[img < v1] = v0
def PIL_torch_to_numpy(img):
img = np.transpose(img.numpy(), (1, 2, 0))
if img.shape[2] == 1:
img = img[:,:,0]
return img
def reverse_test_transform(img, original_size):
'''
reverse the basic mask transformation
:param img:
:param original_size: H X W X C of image
:return:
'''
# resize the tenstor to the original size
reverse_transform = transforms.Compose([transforms.ToPILImage(),
transforms.Resize(original_size[:2]), transforms.ToTensor()])
img = PIL_torch_to_numpy(reverse_transform(img))
return img
def to_binary_mask(labelled_mask, with_borders, use_borders_as_mask):
if use_borders_as_mask:
mask = find_boundaries(labelled_mask, mode='outer')
else:
mask = (labelled_mask > 0)
if with_borders:
mask[find_boundaries(labelled_mask, mode='outer')] = 0
#borders = (labelled_mask > 0).astype(np.uint8) - mask # borders of touching cells (if borders are marked)
borders = find_boundaries(labelled_mask, mode='outer')
return mask.astype(np.uint8), borders.astype(np.uint8)
# add transformations to color
transformations = {
"train_transform_elastic":JointCompose(# transformations
[
# turn mask into 3D RGB-Like for PIL and tensor transformation
TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
#Elastic deformation on the numpy images
TransformSpec(ElasticTransform(), RANDOM_JOINT_TRANSFORM_WITH_BORDERS, prob=0.8),
# Convert borders and mask to 1 channel
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
# color jittering (image only)
TransformSpec(JitterBrightness(), IMG_ONLY_TRANSFORM),
TransformSpec(Negative(), IMG_ONLY_TRANSFORM, prob=0.5),
# turn into a PIL image - required to apply torch transforms (both image, mask and borders)
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
# flipping
TransformSpec(Flip(1), JOINT_TRANSFORM_WITH_BORDERS, prob=0.5),
#resize image (bilinear interpolation)
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
#resize borders (bilinear interpolation)
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
# resize mask
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
# finally turn into a torch tenstor (both image and mask)
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
# ensure mask and borders are binarized
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
),
"train_transform":JointCompose(
[TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
TransformSpec(JitterBrightness(), IMG_ONLY_TRANSFORM, prob=0.5),
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Flip(1), JOINT_TRANSFORM_WITH_BORDERS, prob=0.2),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
),
"train_transform_segment":JointCompose(
[TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
TransformSpec(JitterBrightness(), IMG_ONLY_TRANSFORM, prob=0.9),
TransformSpec(Segment(), IMG_ONLY_TRANSFORM),
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Flip(1), JOINT_TRANSFORM_WITH_BORDERS, prob=0.2),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
),
"train_transform_jitter":JointCompose(
[TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
TransformSpec(JitterBrightness(), IMG_ONLY_TRANSFORM, prob=0.9),
TransformSpec(Negative(), IMG_ONLY_TRANSFORM, prob=0.5),
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Flip(1), JOINT_TRANSFORM_WITH_BORDERS, prob=0.2),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
),
"test_transform":JointCompose(
[TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
TransformSpec(Segment(), IMG_ONLY_TRANSFORM),
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
),
"toy_transform":JointCompose(
[ # turn mask into 3D RGB-Like for PIL and tensor transformation
TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
#Elastic deformation on the numpy images
#TransformSpec(ElasticTransform(), RANDOM_JOINT_TRANSFORM_WITH_BORDERS),
# Convert borders and mask to 1 channel
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
#TransformSpec(Rescale(), IMG_ONLY_TRANSFORM),
# color jittering (image only)
TransformSpec(JitterBrightness(), IMG_ONLY_TRANSFORM),
#TransformSpec(Negative(), IMG_ONLY_TRANSFORM),
TransformSpec(Segment(), IMG_ONLY_TRANSFORM),
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Flip(1), JOINT_TRANSFORM_WITH_BORDERS, prob=0.0),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
)
}
| yolish/kaggle-dsb18 | dsbaugment.py | dsbaugment.py | py | 16,818 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.FLIP_LEFT_RIGHT",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "PIL.Image.FLIP_TOP_BOTTOM",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name"... |
74457209702 | """Futurepedia"""
import json
import re
import time
import random
import scrapy
import os
from urllib.parse import urljoin
from zimeiti.items import ZimeitiItem
from zimeiti.public import refactoring_img, down_img, contenc_description, get_words, timetimes, execute, is_exists, \
refactoring_img1
import math
class MainSpider(scrapy.Spider):
name = "futurepedia"
# allowed_domains = ["xxx.com"]
start_urls = ["https://www.futurepedia.io/ai-tools"]
path = f'//192.168.0.15/data/SEO/images/{name}/'
s = 0
l = 0
def start_requests(self):
start_url = 'https://www.futurepedia.io/api/tags'
yield scrapy.Request(url=start_url,callback=self.lanmu)
def lanmu(self,response):
for resp in json.loads(response.text):
categoryName = resp['categoryName']
toolCount = resp['toolCount']
page = math.ceil(toolCount/9)
for p in range(1,page+1):
url = F'https://www.futurepedia.io/api/tools?page={p}&tag={categoryName}&sort=verified'
yield scrapy.Request(url=url,callback=self.lists,meta={'ncolumn':categoryName})
def lists(self,repsonse):
lists = repsonse.xpath('//ul[@class="yl_left_ul"]/li/a')
if lists:
for li in lists:
fmt = li.xpath('img/@src').get()
fmt_url = urljoin(self.start_urls[0],fmt)
print(fmt_url)
de_url = li.xpath('@href').get()
detail_url = urljoin(self.start_urls[0],de_url)
num = is_exists({'name':self.name,'url':detail_url})
# yield scrapy.Request(url='http://www.41sky.com/gprj/2018-10-24/110.html', callback=self.detail, meta={'ncolumn': repsonse.meta['ncolumn']})
if num == 0:
imgUrl = down_img(fmt_url,repsonse.url,self.path) # 调用下载图片方法
print(imgUrl)
yield scrapy.Request(url=detail_url,callback=self.detail,meta={'ncolumn':repsonse.meta['ncolumn'],'imgUrl':imgUrl})
else:
print('数据库已存在')
pass
next_page = repsonse.xpath('//div[@class="pages"]/ul/li/a[contains(text(),"下一页")]/@href').get()
if next_page:
next_url = urljoin(repsonse.url,next_page)
yield scrapy.Request(url=next_url,callback=self.lists,meta={'ncolumn':repsonse.meta['ncolumn']})
def detail(self,response):
print('获取内容')
self.s += 1
item = ZimeitiItem()
item['title'] = response.xpath('//div[@class="art_content"]/h1/text()').get()
if item['title']:
item['title'] = item['title'].strip()
content = response.xpath('//div[@class="text"]').getall()
if content:
text = "".join(content)
item['Ncontent'] = refactoring_img(text,response.url,self.path)
# item['Ncontent'] = refactoring_img(text,response.url,self.path)
# item['Ncontent'] = content
item['description'] = contenc_description(item['Ncontent'])
item['nkeywords'] = get_words(item['Ncontent'])
item['tag'] = item['nkeywords']
item['domian'] = self.name
item['webName'] = 'Futurepedia'
item['url'] = response.url
item['ncolumn'] = response.meta['ncolumn']
item['naddtime'] = str(int(time.time()))
item['imgUrl'] = response.meta['imgUrl']
# item['lmImgUrl'] = response.meta['lmImgUrl']
item['seo_title'] = response.xpath('//title/text()').get()
item['seo_keywords'] = response.xpath('//meta[@name="keywords"]/@content').get()
item['seo_description'] = response.xpath('//meta[@name="description"]/@content').get()
yield item
if __name__ == '__main__':
os.system('scrapy crawl futurepedia') | AYongmengnan/zimeiti | zimeiti/spiders/futurepedia.py | futurepedia.py | py | 3,916 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_nu... |
14199367630 | import cv2
import pickle as pkl
import time
import xgboost as xgb
import math
import numpy as np
import mediapipe as mp
from speak import speakText
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
width = 640
height = 480
hands = mp_hands.Hands(min_detection_confidence=0.6, min_tracking_confidence=0.75)
modelRight = pkl.load(open('./models/xgboost-model-dynamic-words-16-right-tuned', 'rb'))
modelLeft = pkl.load(open('./models/xgboost-model-dynamic-words-16-left-tuned', 'rb'))
start_time = time.time()
speakWaitTime = 1
labels = {
"0" : "me",
"1" : "you",
"2" : "hello",
"3" : "from",
"4" : "good",
"5" : "how",
"6" : "university",
"7" : "welcome",
"8" : "hope",
"9" : "like",
"10" : "new",
"11" : "people",
"12" : "technology",
"13" : "use",
"14" : "voice",
"15" : "create"
}
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
rightHandFirst = False
isMultiHand = False
# Initialised to True for first frame
addRightFrame = True
addLeftFrame = True
rightKeyFrames = []
leftKeyFrames = []
rightKeyCheckPoints = []
leftKeyCheckPoints = []
rightLabel = ''
rightProb = 0
leftLabel = ''
leftProb = 0
null_12 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
d_threshold = 0.1
connections = [
(1, 4), (5, 8), (9, 12), (13, 16), (17, 20)
]
r_c_x = 0
r_c_y = 0
l_c_x = 0
l_c_y = 0
r_p_x = 0
r_p_y = 0
l_p_x = 0
l_p_y = 0
def generatePointVectors(points, previousFrames):
vectors = []
prev_origin_x = 0
prev_origin_y = 0
dx = 0
dy = 0
if(len(previousFrames) == 0):
prev_origin_x = 0
prev_origin_y = 0
else:
prev_origin_x = previousFrames[0]
prev_origin_y = previousFrames[1]
origin_x, origin_y = points[0]
origin_x_rounded = round((origin_x), 5)
origin_y_rounded = round((origin_y), 5)
dx = origin_x_rounded - prev_origin_x
dy = origin_y_rounded - prev_origin_y
vectors.append(origin_x_rounded)
vectors.append(origin_y_rounded)
for num, connection in enumerate(connections):
x0, y0 = points[connection[0]]
x1, y1 = points[connection[1]]
x_final = x1 - x0
y_final = y1 - y0
mag = math.sqrt((x_final)**2+(y_final)**2)
x_vector = round((x_final/mag) + dx,5)
y_vector = round((y_final/mag) + dy,5)
vectors.append(x_vector)
vectors.append(y_vector)
return vectors
def generateCheckPoints(points):
checkPoints = []
palm_x, palm_y = points[0]
thumb_x, thumb_y = points[4]
index_x, index_y = points[8]
pinky_x, pinky_y = points[20]
mean_x = round((palm_x + thumb_x + index_x + pinky_x)/4, 5)
mean_y = round((palm_y + thumb_y + index_y + pinky_y)/4, 5)
checkPoints.append(mean_x)
checkPoints.append(mean_y)
return checkPoints
def checkPreviousFrame(currCheckPoints, prevCheckPoints):
current_dx = currCheckPoints[0]
current_dy = currCheckPoints[1]
prev_dx = prevCheckPoints[0]
prev_dy = prevCheckPoints[1]
dx = round(abs(current_dx - prev_dx), 5)
dy = round(abs(current_dy - prev_dy), 5)
if(dx >= d_threshold or dy >= d_threshold):
print("Thresold crossed.")
return True, current_dx, current_dy, prev_dx, prev_dy
else:
return False, current_dx, current_dy, prev_dx, prev_dy
def recalculateFrames(frames):
cycledFrames = []
cycledFrames.extend(frames)
# Current Origin
if(len(frames) > 12):
base_x = cycledFrames[0]
base_y = cycledFrames[1]
secondFrame_dx = cycledFrames[12] - base_x
secondFrame_dy = cycledFrames[13] - base_y
# New Origin
new_base_x = cycledFrames[12]
new_base_y = cycledFrames[13]
if(len(frames) > 24):
thirdFrame_dx = cycledFrames[24] - base_x
thirdFrame_dy = cycledFrames[25] - base_y
# New second frame
new_secondFrame_dx = cycledFrames[24] - new_base_x
new_secondFrame_dy = cycledFrames[25] - new_base_y
if(len(frames) > 36):
fourthFrame_dx = cycledFrames[36] - base_x
fourthFrame_dy = cycledFrames[37] - base_y
# New third frame
new_thirdFrame_dx = cycledFrames[36] - new_base_x
new_thirdFrame_dy = cycledFrames[37] - new_base_y
i = 12
while(i < 48):
# This
if(i >= 14 and i < 24 and len(frames) > 12):
cycledFrames[i] = round((cycledFrames[i] - secondFrame_dx), 5)
cycledFrames[i + 1] = round((cycledFrames[i + 1] - secondFrame_dy), 5)
# This
elif(i >= 26 and i < 36 and len(frames) > 24):
original_keyframe_dx = cycledFrames[i] - thirdFrame_dx
original_keyframe_dy = cycledFrames[i + 1] - thirdFrame_dy
cycledFrames[i] = round(original_keyframe_dx + new_secondFrame_dx, 5)
cycledFrames[i + 1] = round(original_keyframe_dy + new_secondFrame_dy, 5)
# This
elif(i >= 38 and i < 48 and len(frames) > 36):
original_keyframe_dx = cycledFrames[i] - fourthFrame_dx
original_keyframe_dy = cycledFrames[i + 1] - fourthFrame_dy
cycledFrames[i] = round(original_keyframe_dx + new_thirdFrame_dx, 5)
cycledFrames[i + 1] = round(original_keyframe_dy + new_thirdFrame_dy, 5)
i = i + 2
# 0 - 11
# 12 - 23
# 24 - 35
# 36 - 47
# Cycle out
cycledFrames = cycledFrames[12:]
return cycledFrames
def preprocessData(frames):
dataToProcess = []
dataToProcess.extend(frames)
if(len(dataToProcess) != 48):
if(len(dataToProcess) == 12):
dataToProcess.extend(null_12)
dataToProcess.extend(null_12)
dataToProcess.extend(null_12)
elif(len(dataToProcess) == 24):
dataToProcess.extend(null_12)
dataToProcess.extend(null_12)
elif(len(dataToProcess) == 36):
dataToProcess.extend(null_12)
else:
print("Error in preprocessData. Length of dataToProcess: ", len(dataToProcess))
group_0 = []
group_0.extend(dataToProcess[:12])
group_0.extend(null_12)
group_0.extend(null_12)
group_0.extend(null_12)
group_1 = []
group_1.extend(dataToProcess[:24])
group_1.extend(null_12)
group_1.extend(null_12)
group_2 = []
group_2.extend(dataToProcess[:36])
group_2.extend(null_12)
group_3 = []
group_3.extend(dataToProcess[:48])
arr_0 = np.array(group_0)
arr_1 = np.array(group_1)
arr_2 = np.array(group_2)
arr_3 = np.array(group_3)
inputData_0 = xgb.DMatrix(arr_0.data)
inputData_1 = xgb.DMatrix(arr_1.data)
inputData_2 = xgb.DMatrix(arr_2.data)
inputData_3 = xgb.DMatrix(arr_3.data)
# Convert values to DMatrix format
return xgb.DMatrix(arr_0.data), xgb.DMatrix(arr_1.data), xgb.DMatrix(arr_2.data), xgb.DMatrix(arr_3.data)
def classification(inputData_0, inputData_1, inputData_2, inputData_3, model):
prob_list_0 = model.predict(inputData_0)[0]
prob_list_1 = model.predict(inputData_1)[0]
prob_list_2 = model.predict(inputData_2)[0]
prob_list_3 = model.predict(inputData_3)[0]
max_prob_0 = np.amax(prob_list_0)
max_prob_1 = np.amax(prob_list_1)
max_prob_2 = np.amax(prob_list_2)
max_prob_3 = np.amax(prob_list_3)
out_label_0 = labels["{}".format(np.argmax(prob_list_0, axis=0))]
out_label_1 = labels["{}".format(np.argmax(prob_list_1, axis=0))]
out_label_2 = labels["{}".format(np.argmax(prob_list_2, axis=0))]
out_label_3 = labels["{}".format(np.argmax(prob_list_3, axis=0))]
label = out_label_0
prob = max_prob_0
if(prob < max_prob_1 and max_prob_1 > max_prob_2 and max_prob_1 > max_prob_3):
prob = max_prob_1
label = out_label_1
elif(prob < max_prob_2 and max_prob_2 > max_prob_3 and max_prob_2 > max_prob_1):
prob = max_prob_2
label = out_label_2
elif(prob < max_prob_3 and max_prob_3 > max_prob_1 and max_prob_3 > max_prob_2):
prob = max_prob_3
label = out_label_3
return label, prob
def cleanUp(frames, model):
temp_frames = []
temp_frames.extend(frames)
temp_label = ''
temp_prob = 0
if(model is None):
temp_frames = []
return temp_frames, temp_label, temp_prob
while(len(temp_frames) != 0):
temp_frames = recalculateFrames(temp_frames)
if(len(temp_frames) != 0):
# Preprocess
set0, set1, set2, set3 = preprocessData(temp_frames)
# Classify
temp_label, temp_prob = classification(set0, set1, set2, set3, model)
temp_frames = []
return temp_frames, temp_label, temp_prob
while cap.isOpened():
success, image = cap.read()
if not success:
break
# Flip the image horizontally for a later selfie-view display, and convert the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to pass by reference.
image.flags.writeable = False
results = hands.process(image)
if(results.multi_handedness):
if(len(results.multi_handedness) == 1):
isMultiHand = False
else:
isMultiHand = True
# results.multi_handedness[0] is first detected hand
if(results.multi_handedness[0].classification[0].index == 0): # Index 0 is Left, 1 is Right
rightHandFirst = False
else:
rightHandFirst = True
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
rightHandPoints = []
leftHandPoints = []
rightVectors = []
leftVectors = []
rightCheckPoints = []
leftCheckPoints = []
for hand, hand_landmarks in enumerate(results.multi_hand_landmarks):
if(rightHandFirst): # First hand (0) is Right, Second hand (1) is Left
if(hand == 0):
for idx, landmark in enumerate(hand_landmarks.landmark):
rightHandPoints.append((landmark.x, landmark.y))
else:
for idx, landmark in enumerate(hand_landmarks.landmark):
leftHandPoints.append((landmark.x, landmark.y))
else: # First hand (0) is Left, Second hand (1) is Right
if(hand == 0):
for idx, landmark in enumerate(hand_landmarks.landmark):
leftHandPoints.append((landmark.x, landmark.y))
else:
for idx, landmark in enumerate(hand_landmarks.landmark):
rightHandPoints.append((landmark.x, landmark.y))
if(isMultiHand):
if(hand == 1):
if(len(rightHandPoints) != 0 and len(leftHandPoints) != 0):
rightVectors = generatePointVectors(rightHandPoints, rightKeyFrames)
rightCheckPoints = generateCheckPoints(rightHandPoints)
leftVectors = generatePointVectors(leftHandPoints, leftKeyFrames)
leftCheckPoints = generateCheckPoints(leftHandPoints)
if(len(rightKeyFrames) == 48):
rightKeyFrames = recalculateFrames(rightKeyFrames)
print("Right Frame Cycled:", len(rightKeyFrames))
if(len(leftKeyFrames) == 48):
leftKeyFrames = recalculateFrames(leftKeyFrames)
print("Left Frame Cycled:", len(leftKeyFrames))
if(addRightFrame == True or addLeftFrame == True):
rightKeyFrames.extend(rightVectors)
rightKeyCheckPoints.extend(rightCheckPoints)
leftKeyFrames.extend(leftVectors)
leftKeyCheckPoints.extend(leftCheckPoints)
print("Right Added: ", len(rightKeyFrames), "Left Added: ", len(leftKeyFrames))
# Preprocess
r_set0, r_set1, r_set2, r_set3 = preprocessData(rightKeyFrames)
l_set0, l_set1, l_set2, l_set3 = preprocessData(leftKeyFrames)
# Classify
rightLabel, rightProb = classification(r_set0, r_set1, r_set2, r_set3, modelRight)
leftLabel, leftProb = classification(l_set0, l_set1, l_set2, l_set3, modelLeft)
addRightFrame = False
addLeftFrame = False
else:
if(len(rightKeyCheckPoints) == 0):
rightKeyCheckPoints.extend(rightCheckPoints)
else:
addRightFrame, r_c_x, r_c_y, r_p_x, r_p_y = checkPreviousFrame(rightCheckPoints, rightKeyCheckPoints)
if(len(leftKeyCheckPoints) == 0):
leftKeyCheckPoints.extend(leftCheckPoints)
else:
addLeftFrame, l_c_x, l_c_y, l_p_x, l_p_y = checkPreviousFrame(leftCheckPoints, leftKeyCheckPoints)
if(addRightFrame == True or addLeftFrame == True):
rightKeyCheckPoints = []
leftKeyCheckPoints = []
if(len(rightKeyFrames) == 48):
rightKeyFrames = recalculateFrames(rightKeyFrames)
print("Right Frame Cycled:", len(rightKeyFrames))
if(len(leftKeyFrames) == 48):
leftKeyFrames = recalculateFrames(leftKeyFrames)
print("Left Frame Cycled:", len(leftKeyFrames))
cv2.circle(image, (int(r_c_x * width), int(r_c_y * height)), 3, (255, 0, 0), 2)
cv2.circle(image, (int(l_c_x * width), int(l_c_y * height)), 3, (255, 255, 0), 2)
cv2.circle(image, (int(r_p_x * width), int(r_p_y * height)), 3, (0, 255, 0), 2)
cv2.circle(image, (int(l_p_x * width), int(l_p_y * height)), 3, (0, 255, 255), 2)
else:
if(len(rightHandPoints) != 0):
rightVectors = generatePointVectors(rightHandPoints, rightKeyFrames)
rightCheckPoints = generateCheckPoints(rightHandPoints)
if(addRightFrame == True):
rightKeyFrames.extend(rightVectors)
rightKeyCheckPoints.extend(rightCheckPoints)
print("Right Frame Added: ", len(rightKeyFrames))
# Preprocess
r_set0, r_set1, r_set2, r_set3 = preprocessData(rightKeyFrames)
# Classify
rightLabel, rightProb = classification(r_set0, r_set1, r_set2, r_set3, modelRight)
leftLabel = ''
leftProb = 0
addRightFrame = False
else:
if(len(rightKeyCheckPoints) == 0):
rightKeyCheckPoints.extend(rightCheckPoints)
else:
addRightFrame, r_c_x, r_c_y, r_p_x, r_p_y = checkPreviousFrame(rightCheckPoints, rightKeyCheckPoints)
if(addRightFrame == True):
rightKeyCheckPoints = []
if(len(rightKeyFrames) == 48):
rightKeyFrames = recalculateFrames(rightKeyFrames)
print("Right Frame Cycled:", len(rightKeyFrames))
if(len(leftKeyFrames) != 0):
leftKeyFrames, leftLabel, leftProb = cleanUp(leftKeyFrames, None)
leftLabel = ''
if(len(leftHandPoints) != 0):
leftVectors = generatePointVectors(leftHandPoints, leftKeyFrames)
leftCheckPoints = generateCheckPoints(leftHandPoints)
if(addLeftFrame == True):
leftKeyFrames.extend(leftVectors)
leftKeyCheckPoints.extend(leftCheckPoints)
print("Left Frame Added: ", len(leftKeyFrames))
# Preprocess
l_set0, l_set1, l_set2, l_set3 = preprocessData(leftKeyFrames)
# Classify
leftLabel, leftProb = classification(l_set0, l_set1, l_set2, l_set3, modelLeft)
rightLabel = ''
rightProb = 0
addLeftFrame = False
else:
if(len(leftKeyCheckPoints) == 0):
leftKeyCheckPoints.extend(leftCheckPoints)
else:
addLeftFrame, l_c_x, l_c_y, l_p_x, l_p_y = checkPreviousFrame(leftCheckPoints, leftKeyCheckPoints)
if(addLeftFrame == True):
leftKeyCheckPoints = []
if(len(leftKeyFrames) == 48):
leftKeyFrames = recalculateFrames(leftKeyFrames)
print("Left Frame Cycled:", len(leftKeyFrames))
if(len(rightKeyFrames) != 0):
rightKeyFrames, rightLabel, rightProb = cleanUp(rightKeyFrames, None)
rightLabel = ''
cv2.circle(image, (int(r_c_x * width), int(r_c_y * height)), 3, (255, 0, 0), 2)
cv2.circle(image, (int(l_c_x * width), int(l_c_y * height)), 3, (255, 255, 0), 2)
cv2.circle(image, (int(r_p_x * width), int(r_p_y * height)), 3, (0, 255, 0), 2)
cv2.circle(image, (int(l_p_x * width), int(l_p_y * height)), 3, (0, 255, 255), 2)
mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
else:
if(len(rightKeyFrames) != 0):
rightKeyFrames, rightLabel, rightProb = cleanUp(rightKeyFrames, modelRight)
rightLabel = ''
if(len(leftKeyFrames) != 0):
leftKeyFrames, leftLabel, leftProb = cleanUp(leftKeyFrames, modelLeft)
leftLabel = ''
cv2.putText(image, rightLabel, (width - 200, height - 10), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2, 1)
cv2.putText(image, str(rightProb), (10, height - 10), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2, 1)
# Calculate FPS
if (time.time() - start_time) > speakWaitTime :
# Speak
if rightLabel:
speakText(rightLabel)
start_time = time.time()
cv2.imshow('MediaPipe Hands', image)
if cv2.waitKey(5) & 0xFF == 27:
break
hands.close()
cap.release() | bahrain-uob/PoseMate | run_v2.py | run_v2.py | py | 19,687 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "mediapipe.solutions",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "mediapipe.solutions",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pickle.... |
43751185703 | from __future__ import unicode_literals
import os
import tempfile
import unittest
from textwrap import dedent
import mock
import six
import rows
import rows.plugins.postgresql
import rows.plugins.utils
import tests.utils as utils
from rows import fields
from rows.plugins.postgresql import pgconnect
from rows.utils import Source
class PluginPostgreSQLTestCase(utils.RowsTestMixIn, unittest.TestCase):
plugin_name = "postgresql"
override_fields = {
"bool_column": fields.BoolField,
"percent_column": fields.FloatField,
}
uri = os.environ["POSTGRESQL_URI"]
expected_meta = {
"imported_from": "postgresql",
"source": Source(uri=uri, plugin_name=plugin_name, encoding=None),
}
def get_table_names(self):
connection = pgconnect(self.uri)
cursor = connection.cursor()
cursor.execute(rows.plugins.postgresql.SQL_TABLE_NAMES)
header = [item[0] for item in cursor.description]
result = [dict(zip(header, row))["tablename"] for row in cursor.fetchall()]
cursor.close()
connection.close()
return result
def tearDown(self):
connection = pgconnect(self.uri)
for table in self.get_table_names():
if table.startswith("rows_"):
cursor = connection.cursor()
cursor.execute("DROP TABLE " + table)
cursor.close()
connection.commit()
connection.close()
def test_imports(self):
self.assertIs(
rows.import_from_postgresql, rows.plugins.postgresql.import_from_postgresql
)
self.assertIs(
rows.export_to_postgresql, rows.plugins.postgresql.export_to_postgresql
)
@mock.patch("rows.plugins.postgresql.create_table")
def test_import_from_postgresql_uses_create_table(self, mocked_create_table):
mocked_create_table.return_value = 42
kwargs = {"encoding": "test", "some_key": 123, "other": 456}
rows.export_to_postgresql(utils.table, self.uri, table_name="rows_1")
result = rows.import_from_postgresql(self.uri, table_name="rows_1", **kwargs)
self.assertTrue(mocked_create_table.called)
self.assertEqual(mocked_create_table.call_count, 1)
self.assertEqual(result, 42)
call = mocked_create_table.call_args
meta = call[1].pop("meta")
source = meta.pop("source")
expected_meta = self.expected_meta.copy()
expected_source = expected_meta.pop("source")
self.assertEqual(call[1], kwargs)
self.assertEqual(meta, expected_meta)
self.assertEqual(expected_source.uri, source.uri)
@unittest.skipIf(six.PY2, "psycopg2 on Python2 returns binary, skippging test")
@mock.patch("rows.plugins.postgresql.create_table")
def test_import_from_postgresql_retrieve_desired_data(self, mocked_create_table):
mocked_create_table.return_value = 42
connection, table_name = rows.export_to_postgresql(
utils.table, self.uri, table_name="rows_2"
)
self.assertTrue(connection.closed)
# import using uri
table_1 = rows.import_from_postgresql(
self.uri, close_connection=True, table_name="rows_2"
)
call_args = mocked_create_table.call_args_list[0]
self.assert_create_table_data(call_args, expected_meta=self.expected_meta)
# import using connection
connection = pgconnect(self.uri)
table_2 = rows.import_from_postgresql(
connection, close_connection=False, table_name="rows_2"
)
self.assertFalse(connection.closed)
connection_type = type(connection)
connection.close()
call_args = mocked_create_table.call_args_list[1]
meta = call_args[1].pop("meta")
call_args[1]["meta"] = {}
self.assert_create_table_data(call_args, expected_meta={})
self.assertTrue(isinstance(meta["source"].fobj, connection_type))
def test_postgresql_injection(self):
with self.assertRaises(ValueError):
rows.import_from_postgresql(
self.uri, table_name=('table1","postgresql_master')
)
with self.assertRaises(ValueError):
rows.export_to_postgresql(
utils.table, self.uri, table_name='table1", "postgresql_master'
)
@unittest.skipIf(six.PY2, "psycopg2 on Python2 returns binary, skippging test")
def test_export_to_postgresql_uri(self):
rows.export_to_postgresql(utils.table, self.uri, table_name="rows_3")
table = rows.import_from_postgresql(self.uri, table_name="rows_3")
self.assert_table_equal(table, utils.table)
@unittest.skipIf(six.PY2, "psycopg2 on Python2 returns binary, skippging test")
def test_export_to_postgresql_connection(self):
connection = pgconnect(self.uri)
rows.export_to_postgresql(
utils.table, connection, close_connection=True, table_name="rows_4"
)
table = rows.import_from_postgresql(self.uri, table_name="rows_4")
self.assert_table_equal(table, utils.table)
@unittest.skipIf(six.PY2, "psycopg2 on Python2 returns binary, skippging test")
def test_export_to_postgresql_create_unique_table_name(self):
first_table = utils.table
second_table = utils.table + utils.table
table_names_before = self.get_table_names()
rows.export_to_postgresql(
first_table, self.uri, table_name_format="rows_{index}"
)
table_names_after = self.get_table_names()
rows.export_to_postgresql(
second_table, self.uri, table_name_format="rows_{index}"
)
table_names_final = self.get_table_names()
diff_1 = list(set(table_names_after) - set(table_names_before))
diff_2 = list(set(table_names_final) - set(table_names_after))
self.assertEqual(len(diff_1), 1)
self.assertEqual(len(diff_2), 1)
new_table_1 = diff_1[0]
new_table_2 = diff_2[0]
result_first_table = rows.import_from_postgresql(
self.uri, table_name=new_table_1
)
result_second_table = rows.import_from_postgresql(
self.uri, table_name=new_table_2
)
self.assert_table_equal(result_first_table, first_table)
self.assert_table_equal(result_second_table, second_table)
@unittest.skipIf(six.PY2, "psycopg2 on Python2 returns binary, skippging test")
def test_export_to_postgresql_forcing_table_name_appends_rows(self):
repeat = 3
for _ in range(repeat):
rows.export_to_postgresql(utils.table, self.uri, table_name="rows_7")
expected_table = utils.table
for _ in range(repeat - 1):
expected_table += utils.table
result_table = rows.import_from_postgresql(self.uri, table_name="rows_7")
self.assertEqual(len(result_table), repeat * len(utils.table))
self.assert_table_equal(result_table, expected_table)
@mock.patch("rows.plugins.postgresql.prepare_to_export")
def test_export_to_postgresql_prepare_to_export(self, mocked_prepare_to_export):
encoding = "iso-8859-15"
kwargs = {"test": 123, "parameter": 3.14}
mocked_prepare_to_export.return_value = iter(
rows.plugins.utils.prepare_to_export(utils.table)
)
rows.export_to_postgresql(
utils.table, self.uri, encoding=encoding, table_name="rows_8", **kwargs
)
self.assertTrue(mocked_prepare_to_export.called)
self.assertEqual(mocked_prepare_to_export.call_count, 1)
call = mocked_prepare_to_export.call_args
self.assertEqual(call[0], (utils.table,))
kwargs["encoding"] = encoding
self.assertEqual(call[1], kwargs)
def test_import_from_postgresql_query_args(self):
connection, table_name = rows.export_to_postgresql(
utils.table, self.uri, close_connection=False, table_name="rows_9"
)
table = rows.import_from_postgresql(
connection,
query="SELECT * FROM rows_9 WHERE float_column > %s",
query_args=(3,),
)
for row in table:
self.assertTrue(row.float_column > 3)
def test_pgimport_force_null(self):
temp = tempfile.NamedTemporaryFile()
filename = "{}.csv".format(temp.name)
temp.close()
self.files_to_delete.append(filename)
with open(filename, mode="wb") as fobj:
fobj.write(
dedent(
"""
field1,field2
"","4"
,2
"""
)
.strip()
.encode("utf-8")
)
rows.utils.pgimport(
filename=filename,
database_uri=self.uri,
table_name="rows_force_null",
)
table = rows.import_from_postgresql(self.uri, "rows_force_null")
self.assertIs(table[0].field1, None)
self.assertEqual(table[0].field2, 4)
self.assertIs(table[1].field1, None)
self.assertEqual(table[1].field2, 2)
| turicas/rows | tests/tests_plugin_postgresql.py | tests_plugin_postgresql.py | py | 9,172 | python | en | code | 851 | github-code | 36 | [
{
"api_name": "tests.utils.RowsTestMixIn",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "tests.utils",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "row... |
30085141101 | from typing import TypedDict
from gdbdash.commands import BoolOption, StrOption
from gdbdash.modules import Module
from gdbdash.utils import FileDescriptorOrPath
DashboardOptions = TypedDict(
"DashboardOptions",
{
"text-highlight": StrOption,
"text-secondary": StrOption,
"text-divider": StrOption,
"text-divider-title": StrOption,
"divider-fill-char": StrOption,
"show-divider": BoolOption,
},
)
DashboardModulesDict = dict[FileDescriptorOrPath, list[Module]]
class Dashboard:
def on_order_changed(self) -> None: ...
| JensDll/dotfiles | unix/.config/gdbdash/gdbdash/dashboard.pyi | dashboard.pyi | pyi | 586 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TypedDict",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "gdbdash.commands.StrOption",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "gdbdash.commands.StrOption",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": ... |
7939019332 | from itertools import combinations
def answer(l):
l.sort(reverse=True)
for i in range(len(l), 0, -1):
for c in itertools.combinations(l, i):
if sum(c) % 3 == 0:
return int(''.join(map(str, c)))
return 0
| deepspacepirate/googlefoobar | L2-please_pass_the_coded_messages.py | L2-please_pass_the_coded_messages.py | py | 235 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.combinations",
"line_number": 7,
"usage_type": "call"
}
] |
20157445865 | from collections import defaultdict
from itertools import permutations
class DinnerTable:
def __init__(self, instructions) -> None:
self.ins = instructions
self.happiness_scores = {}
self.guest_list = []
def process_happiness_scores(self):
lines = [line for line in self.ins.split("\n") if line != ""]
for line in lines:
elements = line.split()
name = elements[0].strip()
pos_neg = 0
if elements[2].strip() == "gain":
pos_neg = 1
else:
pos_neg = -1
score = int(elements[3].strip())
neighbour = elements[-1][:-1]
if name not in self.happiness_scores.keys():
self.happiness_scores[name] = {}
self.happiness_scores[name].update({neighbour: score * pos_neg})
self.guest_list = list(self.happiness_scores.keys())
def add_host_to_happiness_scores(self):
self.happiness_scores["Host"] = {}
self.guest_list = list(self.happiness_scores.keys())
for guest in self.guest_list:
self.happiness_scores["Host"].update({guest: 0})
self.happiness_scores[guest].update({"Host": 0})
def seating_plan_naive(self):
possible_plans = permutations(self.guest_list)
possible_plans = [p + (p[0],) for p in possible_plans]
best_plan = []
best_plan_score = 0
for plan in possible_plans:
happiness = 0
first = plan[0]
for neighbour in plan[1:]:
happiness += self.happiness_scores[first][neighbour]
happiness += self.happiness_scores[neighbour][first]
first = neighbour
if happiness > best_plan_score:
best_plan_score = happiness
best_plan = plan
print(
f"The happiest plan {best_plan} with a happiness score of {best_plan_score}."
)
return best_plan_score
if __name__ == "__main__":
with open("input.txt", "r") as f:
instructions = f.read()
dt = DinnerTable(instructions)
dt.process_happiness_scores()
dt.add_host_to_happiness_scores()
dt.seating_plan_naive()
| davidcolton/adventofcode | 2015/day_13/dinner.py | dinner.py | py | 2,243 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.permutations",
"line_number": 37,
"usage_type": "call"
}
] |
37350691317 | from ase import Atom, Atoms
from ase.units import Bohr
from gpaw import GPAW
from gpaw.test import equal
def test_xc_nonselfconsistentLDA(in_tmp_dir):
a = 7.5 * Bohr
n = 16
atoms = Atoms([Atom('He', (0.0, 0.0, 0.0))], cell=(a, a, a), pbc=True)
calc = GPAW(gpts=(n, n, n), nbands=1, xc='LDA')
atoms.calc = calc
e1 = atoms.get_potential_energy()
e1ref = calc.get_reference_energy()
de12 = calc.get_xc_difference({'name': 'PBE', 'stencil': 1})
calc = GPAW(gpts=(n, n, n), nbands=1, xc={'name': 'PBE', 'stencil': 1})
atoms.calc = calc
e2 = atoms.get_potential_energy()
e2ref = calc.get_reference_energy()
de21 = calc.get_xc_difference('LDA')
print(e1ref + e1 + de12, e2ref + e2)
print(e1ref + e1, e2ref + e2 + de21)
print(de12, de21)
equal(e1ref + e1 + de12, e2ref + e2, 0.02)
equal(e1ref + e1, e2ref + e2 + de21, 0.025)
calc.write('PBE.gpw')
de21b = GPAW('PBE.gpw').get_xc_difference('LDA')
print(de21, de21b)
equal(de21, de21b, 9e-8)
energy_tolerance = 0.0007
equal(e1, -0.0961003634812, energy_tolerance) # svnversion 5252
equal(e2, -0.0790249564625, energy_tolerance) # svnversion 5252
| f-fathurrahman/ffr-learns-gpaw | my_gpaw/test/xc/test_nonselfconsistentLDA.py | test_nonselfconsistentLDA.py | py | 1,193 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ase.units.Bohr",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "ase.Atoms",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "ase.Atom",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "gpaw.GPAW",
"line_number": 11,
... |
31826768248 | # make some tests
import os, glob
from pprint import pprint
from importlib import reload
import designspaceProblems
reload(designspaceProblems)
import designspaceProblems.problems
reload(designspaceProblems.problems)
from designspaceProblems.problems import DesignSpaceProblem, allProblems
from designspaceProblems import DesignSpaceChecker
import ufoProcessor
from fontTools.designspaceLib import SourceDescriptor, InstanceDescriptor, AxisDescriptor, DiscreteAxisDescriptor, RuleDescriptor, processRules
from ufoProcessor import DesignSpaceProcessor, getUFOVersion, getLayer
from fontParts.fontshell import RFont
import fontTools
print("fontTools source:", fontTools.__file__)
print("AxisDescriptor class:", AxisDescriptor)
def printProblems(dc):
for pr in dc.problems:
print(pr)
testedProblems = {}
def showProblems(dc):
global testedProblems
for pr in dc.problems:
key = (pr.category,pr.problem)
if not key in testedProblems:
testedProblems[key] = 0
testedProblems[key] += 1
def showUntested():
global testedProblems
# these problems can't be tested because UFOprocessor already ignores these faults
untestable = [(1,1), (1,2), (1,3), (1,4), (1,5), (1,6), (1,7),
(2, 4), (2,5), (3, 2),
(6, 0), (6, 1), (6, 2), (6, 3),
(4, 5),
]
print("\n\nTested problems")
app = allProblems()
for ap in list(app.keys()):
if ap in testedProblems:
print("✅", ap, app.get(ap))
elif ap in untestable:
print("❔", ap, app.get(ap))
else:
print("❌", ap, app.get(ap))
def makeTests():
path = os.getcwd()
errs = designspaceProblems.problems.allProblems()
# empty designspace
d1 = DesignSpaceProcessor()
tp = os.path.join(path, "empty.designspace")
d1.write(tp)
dc1 = DesignSpaceChecker(tp)
dc1.checkEverything()
showProblems(dc1)
assert (1,0) in dc1.problems # no axes defined
assert (2,0) in dc1.problems # no sources defined
assert (2,7) in dc1.problems # no source on default location
assert (3,10) in dc1.problems # no instances defined
# # malformed file
# d2 = DesignSpaceProcessor()
# tp = os.path.join(path, "malformed_file.designspace")
# d2.write(tp)
# f = open(tp, 'r')
# d2 = f.read()
# f.close()
# d2 += "garbage"*100
# f = open(tp, 'w')
# f.write(d2)
# f.close()
# dc2 = DesignSpaceChecker(tp)
# dc2.checkEverything()
# showProblems(dc2)
# assert (0,0) in dc2.problems # no axes defined
# assert (1,0) in dc2.problems # no axes defined
# assert (2,0) in dc2.problems # no sources defined
# assert (2,7) in dc2.problems # no source on default location
# assert (3,10) in dc2.problems # no instances defined
# # malformed axes
# d3 = DesignSpaceProcessor()
# tp = os.path.join(path, "malformed_axis.designspace")
# a31 = AxisDescriptor()
# a31.name = "snap"
# a31.minimum = 1000
# a31.maximum = 1000
# a31.default = 1000
# a31.tag = "1111"
# d3.addAxis(a31)
# a32 = AxisDescriptor()
# a32.name = "crackle"
# a32.minimum = 0
# a32.maximum = 1000
# a32.default = -1000
# a32.tag = "CRCK"
# d3.addAxis(a32)
# d3.write(tp)
# dc3 = DesignSpaceChecker(tp)
# dc3.checkEverything()
# showProblems(dc3)
# assert (1,9) in dc3.problems # minimum and maximum value are the same
# assert (1,10) in dc3.problems # minimum and maximum value are the same
# assert (2,0) in dc3.problems # no sources defined
# assert (2,7) in dc3.problems # no source on default location
# assert (3,10) in dc3.problems # no instances defined
# designspace with discrete axis
d4 = DesignSpaceProcessor()
tp = os.path.join(path, "discrete_axes.designspace")
# this designspace has 2 axes: one continuous, one discrete
# we'll add a default for discrete crackle=0, but not a discrete crackle=500
a41 = AxisDescriptor()
a41.name = "snap"
a41.minimum = 0
a41.maximum = 1000
a41.default = 0
a41.tag = "SNAP"
d4.addAxis(a41)
a42 = DiscreteAxisDescriptor()
a42.name = "crackle"
a42.default = 0
a42.values = [0, 500]
a42.tag = "CRCK"
print(a42.values)
d4.addAxis(a42)
s41 = SourceDescriptor()
s41.location = dict(snap=0, crackle=0)
s41.path = os.path.join(path, 'masters','geometryMaster1.ufo')
d4.addSource(s41)
s42 = SourceDescriptor()
s42.location = dict(snap=0, crackle=1000)
s42.path = os.path.join(path, 'masters','geometryMaster2.ufo')
d4.addSource(s42)
d4.write(tp)
dc4 = DesignSpaceChecker(tp)
dc4.checkEverything()
printProblems(dc4)
showProblems(dc4)
#print("checkLocationForIllegalDiscreteValues", dc4.checkLocationForIllegalDiscreteValues(dict(snap=0,crackle=1000)))
assert (2,13) in dc4.problems
#assert (1,0) in dc.problems # no axes defined
#assert (2,0) in dc.problems # no sources defined
#assert (2,7) in dc.problems # no source on default location
#assert (3,10) in dc.problems # no instances defined
# ok axis, a source, but no default
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "no_default.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=500)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (2,7) in dc.problems # no source on default location
# # ok axis, multiple sources on default
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "multiple_defaults.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# s2 = SourceDescriptor()
# s2.location = dict(snap=0)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (2,8) in dc.problems # multiple sources on default location
# assert (2,1) not in dc.problems # not: source UFO missing
# # ok axis, source without location
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "source-without-location.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# s2 = SourceDescriptor()
# s2.location = dict(snap=(10,11))
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (2,10) in dc.problems # source location is anisotropic
# # ok space, no kerning in default
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "no-kerning-in-default.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1_no_kerning.ufo')
# d.addSource(s1)
# s2 = SourceDescriptor()
# s2.location = dict(snap=1000)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (5,1) in dc.problems # ok axis, source without location
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "source-without-location.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# s2 = SourceDescriptor()
# s2.location = dict(snap=(10,11))
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (2,10) in dc.problems # source location is anisotropic
# # ok space, missing UFO
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "source-ufo-missing.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# a2 = AxisDescriptor()
# a2.name = "pop"
# a2.minimum = 0
# a2.maximum = 1000
# a2.default = 0
# a2.tag = "pop_"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# s2 = SourceDescriptor()
# s2.location = dict(snap=500)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# s2.layerName = "missing_layer"
# d.addSource(s2)
# s3 = SourceDescriptor()
# s3.location = dict(snap=1000)
# s3.path = os.path.join(path, 'masters','geometryMaster_missing.ufo')
# d.addSource(s3)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (2,1) in dc.problems # source location is anisotropic
# assert (2,3) in dc.problems # source layer missing
# # multiple ssources in same location
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "multiple_sources_on_same_location.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# #s1.name = "master.1"
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# #s2.name = "master.2"
# for i in range(3):
# s2 = SourceDescriptor()
# s2.location = dict(snap=1500)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(d)
# dc.checkEverything()
# showProblems(dc)
# # instance without location
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "instance_without_location.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# #s1.name = "master.1"
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# #s2.name = "master.2"
# s2 = SourceDescriptor()
# s2.location = dict(snap=1000)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# jd = InstanceDescriptor()
# jd.familyName = None
# jd.styleName = None
# jd.location = None
# jd.path = None
# d.addInstance(jd)
# for i in range(3):
# jd = InstanceDescriptor()
# jd.familyName = "Duped"
# jd.styleName = "Duped"
# jd.location = dict(snap=666)
# jd.path = "some/path.ufo"
# d.addInstance(jd)
# d.write(tp)
# dc = DesignSpaceChecker(d)
# dc.checkEverything()
# showProblems(dc)
# #assert (3,1) in dc.problems # instance location missing
# assert (3,4) in dc.problems # multiple instances on location*
# # mapping tests
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "axismapping.designspace")
# a1 = AxisDescriptor()
# a1.name = "ok_axis"
# a1.minimum = 200
# a1.maximum = 800
# a1.default = 200
# a1.tag = "ax01"
# a1.map = [(200,0), (500, 500), (800, 1000)] # map is ok
# d.addAxis(a1)
# a2 = AxisDescriptor()
# a2.name = "input_regression_axis"
# a2.minimum = 200
# a2.maximum = 800
# a2.default = 500
# a2.tag = "ax02"
# a2.map = [(200,100), (190, 150), (800, 200)] # input regresses ok, output ok
# d.addAxis(a2)
# a3 = AxisDescriptor()
# a3.name = "output_regression_axis"
# a3.minimum = 500
# a3.maximum = 800
# a3.default = 600
# a3.tag = "ax03"
# a3.map = [(500,0), (600, 500), (700, 700), (800, 690)] # input progresses ok, output regresses
# d.addAxis(a3)
# a4 = AxisDescriptor()
# a4.name = "mixedup_extremes_axis"
# a4.minimum = 1000
# a4.maximum = 800
# a4.default = 0
# a4.tag = "ax04"
# d.addAxis(a4)
# s1 = SourceDescriptor()
# #s1.name = "master.1"
# s1.location = dict(ok_axis=a1.default, output_regression_axis=a3.default)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# #s2.name = "master.2"
# s2 = SourceDescriptor()
# s2.location = dict(ok_axis=a1.default, output_regression_axis=a3.maximum)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(d)
# dc.checkEverything()
# assert (1,11) in dc.problems
# assert (1,12) in dc.problems
# showProblems(dc)
# # ok axis, ok sources
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "viable.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# #s1.name = "master.1"
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# #s2.name = "master.2"
# s2 = SourceDescriptor()
# s2.location = dict(snap=1000)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# s3 = SourceDescriptor()
# s3.location = dict(snap=500)
# s3.path = os.path.join(path, 'masters','geometryMaster3.ufo') # bad kerning
# d.addSource(s3)
# jd = InstanceDescriptor()
# jd.familyName = "TestFamily"
# jd.styleName = "TestStyle"
# jd.location = dict(snap=500)
# jd.path = os.path.join(path, 'instances','generatedInstance.ufo')
# d.addInstance(jd)
# jd = InstanceDescriptor()
# jd.familyName = None
# jd.styleName = None
# jd.location = dict(snap=600)
# jd.path = os.path.join(path, 'instances','generatedInstance2.ufo')
# d.addInstance(jd)
# jd = InstanceDescriptor()
# jd.familyName = "Aa"
# jd.styleName = "Bb"
# jd.location = dict(snap=600)
# jd.path = None
# d.addInstance(jd)
# r1 = RuleDescriptor()
# r1.name = "rule_no_subs"
# cd1 = dict(name='lalala', minimum=100, maximum=200)
# cd2 = dict(name='snap', minimum=10000, maximum=2000)
# cd3 = dict(name='snap', minimum=10000, maximum=None) # problem 7,10
# cd4 = dict(name='snap', minimum=None, maximum=10000) # problem 7,11
# r1.conditionSets.append([cd1, cd2, cd3, cd4])
# d.addRule(r1)
# r2 = RuleDescriptor()
# r2.name = "rule_no_conditionset"
# r2.subs.append(('glyphFour', 'glyphFour'))
# d.addRule(r2)
# r3 = RuleDescriptor()
# r3.name = "rule_values_the_same"
# cd1 = dict(name='samesees_1', minimum=200, maximum=200)
# r1.conditionSets.append([cd1, cd1, cd1])
# r3.subs.append(('glyphFour', 'glyphFour'))
# d.addRule(r3)
# # data for 7, 9 rule without a name
# r4 = RuleDescriptor()
# r4.name = None
# cd1 = dict(name='samesees_2', minimum=200, maximum=200)
# r1.conditionSets.append([cd1, cd1, cd1])
# r4.subs.append(('glyphFour', 'glyphFour'))
# d.addRule(r4)
# d.write(tp)
# dc = DesignSpaceChecker(d)
# dc.checkEverything()
# showProblems(dc)
# for p in dc.problems:
# print(p)
# assert not dc.hasStructuralProblems() # minimum working designspace, ready for fonts
# assert (3,6) in dc.problems # missing family name
# assert (3,7) in dc.problems # missing style name
# assert (4,1) in dc.problems # components
# assert (4,2) in dc.problems # default glyph is empty, glyphName
# assert (4,7) in dc.problems # default glyph is empty, glyphName
# assert (4,9) in dc.problems # incompatible constructions for glyph
# assert (5,0) in dc.problems # kerning: no kerning in source
# assert (5,6) in dc.problems # no kerning groups in source
# assert (6,4) in dc.problems # source font unitsPerEm value different from default unitsPerEm
# assert (7,2) in dc.problems # source and destination glyphs the same
# assert (7,3) in dc.problems # no substition glyphs defined
# assert (7,4) in dc.problems # no conditionset defined
# assert (7,5) in dc.problems # condition values on unknown axis
# assert (7,6) in dc.problems # condition values out of axis bounds
# #print(tp)
# for p in dc.problems:
# if p == (4,9):
# print(p)
# # badly populated designspace
# # this system does not have on-axis masters
# # but a couple of non-aligned off-axis masters.
# # Varlib will complain
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "badly_populated.designspace")
# a1 = AxisDescriptor()
# a1.name = "weight"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "wght"
# d.addAxis(a1)
# a2.name = "width"
# a2.minimum = -500
# a2.maximum = 500
# a2.default = 0
# a2.tag = "wdth"
# d.addAxis(a2)
# a3.name = "optical"
# a3.minimum = 0
# a3.maximum = 1000
# a3.default = 0
# a3.tag = "opsz"
# d.addAxis(a3)
# # neutral
# s1 = SourceDescriptor()
# s1.location = dict(weight=0, width=0, optical=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# # offaxis master 1
# s2 = SourceDescriptor()
# s2.location = dict(width=-500, weight=1000, optical=0)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# # offaxis master 2
# s3 = SourceDescriptor()
# s3.location = dict(width=0, weight=1000, optical=1000)
# s3.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s3)
# # offaxis master 2
# s4 = SourceDescriptor()
# s4.location = dict(width=500, weight=1000, optical=1000)
# s4.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s4)
# d.write(tp)
# dc = DesignSpaceChecker(d)
# dc.checkEverything()
#showProblems(dc)
showUntested()
def makeEdit(path, find, replace):
f = open(path, 'r')
t = f.read()
f.close()
t = t.replace(find, replace)
f = open(path, 'w')
f.write(t)
f.close()
makeTests()
| LettError/DesignspaceProblems | tests/makeTests.py | makeTests.py | py | 19,721 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "importlib.reload",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "importlib.reload",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "designspaceProblems.problems",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "fo... |
35018450468 | from os import path
import sys
import metavision_designer_engine as mvd_engine
from metavision_designer_engine import Controller
import metavision_designer_cv as mvd_cv
import metavision_designer_core as mvd_core
import metavision_hal as mv_hal
import cv2
from Python.Event_Processor.EventProcessor import EventProcessor
from Python.Log_Luminance import Log_Luminance, Gen_Image
from metavision_designer_core import RoiFilter
# ce fichier est le ficher d'input raw si on choisis de ne pas utiliser la caméra événementielle
input_filename = "../../Movie/Log_Luminance/out_2021-07-07_13-13-28.raw" # ne fonctionne pas avec ~/
cam = input("Do you want to use cam ? Y or N ")
if cam == "Y" or cam == "y":
from_file = False
controller = Controller()
device = mv_hal.DeviceDiscovery.open('')
# Add the device interface to the pipeline
interface = mvd_core.HalDeviceInterface(device)
controller.add_device_interface(interface)
cd_producer = mvd_core.CdProducer(interface)
else:
# input_filename = input("File path from main ")
from_file = True
# Check validity of input arguments
if not (path.exists(input_filename) and path.isfile(input_filename)):
print("Error: provided input path '{}' does not exist or is not a file.".format(input_filename))
sys.exit(1)
is_raw = input_filename.endswith('.raw')
if not is_raw:
print("Error: provided input path '{}' does not have the right extension. ".format(input_filename) +
"It has either to be a .raw or a .dat file")
sys.exit(1)
controller = mvd_engine.Controller()
device = mv_hal.DeviceDiscovery.open_raw_file(input_filename)
if not device:
print("Error: could not open file '{}'.".format(input_filename))
sys.exit(1)
# Add the device interface to the pipeline
interface = mvd_core.HalDeviceInterface(device)
controller.add_device_interface(interface)
cd_producer = mvd_core.CdProducer(interface)
# Start the streaming of events
i_events_stream = device.get_i_events_stream()
i_events_stream.start()
# Add cd_producer to the pipeline
controller.add_component(cd_producer, "CD Producer")
# Get the sensor size.
geometry = device.get_i_geometry()
width = geometry.get_width()
height = geometry.get_height()
print("Sensor size width = {} height = {}".format(width, height))
# crop pour la caméra événementielle
roi_width = int(100)
roi_height = int(100)
x0 = int(width / 2 - roi_width / 2)
y0 = int(height / 2 - roi_height / 2)
x1 = x0 + roi_width
y1 = y0 + roi_height
roi_filter = RoiFilter(cd_producer, x0, y0, x1, y1)
controller.add_component(roi_filter)
print("ROI size width = {} height = {} Number of pixels = {}".format(roi_width, roi_height, roi_width * roi_height))
# ActivityNoiseFilter configuration
time_window_length = 1500 # duration in us plus c'est bas plus c'est filtré
cd_filtered = mvd_cv.ActivityNoiseFilter(roi_filter, time_window_length)
controller.add_component(cd_filtered, "Noise filter")
filtered_frame_gen = mvd_core.FrameGenerator(cd_filtered)
controller.add_component(filtered_frame_gen, "Filtered frame generator")
# Create Frame Generator with 20ms accumulation time
frame_gen = mvd_core.FrameGenerator(cd_filtered)
frame_gen.set_dt(20000)
controller.add_component(frame_gen, "FrameGenerator")
# We use PythonConsumer to "grab" the output of two components: cd_producer and frame_gen
# pyconsumer will callback the application each time it receives data, using the event_callback function
frame_gen_name = "FrameGen"
cd_prod_name = "CDProd"
ev_proc = EventProcessor(event_gen_name=cd_prod_name, frame_gen_name=frame_gen_name, width=width, height=height,
display_callback=False)
pyconsumer = mvd_core.PythonConsumer(ev_proc.event_callback)
pyconsumer.add_source(cd_filtered, cd_prod_name) # filtered (cd_filtered) or not filtered (cd_producer)
pyconsumer.add_source(filtered_frame_gen, frame_gen_name) # filtered (filtered_frame_gen) or not filtered (frame_gen)
controller.add_component(pyconsumer, "PythonConsumer")
controller.set_slice_duration(10000)
controller.set_batch_duration(50000)
do_sync = True if from_file else False
# Start the camera
if not from_file:
simple_device = device.get_i_device_control()
simple_device.start()
# Start the streaming of events
i_events_stream = device.get_i_events_stream()
i_events_stream.start()
#################################Parameters#################################
# on part du principe que l'image est carré
divide_matrix_by = 2 # de combien on divise la taille de l'image, si on part d'une résolution 100*100 on obtient du 50*50 si divisé par 2
print("divide size width = {} height = {} Number of pixels = {}".format(int(roi_width/divide_matrix_by), int(roi_height/divide_matrix_by),
int(roi_width/divide_matrix_by) * int(roi_height/divide_matrix_by)))
# les deux matrices de niveaux qui permet de faire fonctionner la log luminance
# on les garde d'un batch à l'autre ce qui fait que si on a qu'un seul gros batch qui contient tous les événements l'algo fonctionne quand même
matrix_level_HQ = Log_Luminance.gen_matrix_PixelState(roi_width, roi_height) # correspond à la haute résolution
matrix_level_LQ = Log_Luminance.gen_matrix_PixelState(int(roi_width / divide_matrix_by), int(roi_height / divide_matrix_by)) # correspond à la basse résolution
#Make Video
# si ce paramétre est mis en true, on ajoute les image créer à chaque batch dans un tableau puis on les enregistre sur le disque avec le nom spécifié dans nom_video
# le nombre de seconde est le nombre de second que la vidéo va duré, j'ai paramétré la video pour qu'elle soit cadancé à 28fps
# le temps de filmé correspond donc au temps pour acquérir nb_second_de_video*28 images au total
make_video_at_the_end = False
nb_second_de_video = 15
nom_video = 'ahahah' # juste mettre le nom, le fichier sortira en .avi à la fin du programme
array_img = []
while not controller.is_done():
controller.run(do_sync)
events = ev_proc.get_event() # tableau d'event
events_LQ = Log_Luminance.log_luminance(events, matrix_level_HQ, matrix_level_LQ, divide_matrix_by, (width, height),
(roi_width, roi_height), treshold=1, interpolation=0)
# cette fonction ne marche pas et je ne comprend pas POURQUOI AAAAAAAAHHHHH: elle fonctionne maintenant mais le commentaire me fait sourir
img_original = ev_proc.get_cut_event_2d_arrays(x0, x1, y0, y1)
img = Gen_Image.create_image_rgb_from_log_luminance(events_LQ, int(roi_width/divide_matrix_by), int(roi_height/divide_matrix_by))
img_original = cv2.resize(img_original, (200, 200))
img = cv2.resize(img, (200, 200))
cv2.imshow("Original", img_original)
cv2.imshow("Log Luminance", img)
# les deux ligne de code en dessous permettes de visualiser le fonctionnement des matrices de niveaux mais consomme beaucoup de ressources
#cv2.imshow("pixelstateHQ", cv2.resize(Gen_Image.create_image_rgb_from_pixel_state(matrix_level_HQ), (400, 400)))
#cv2.imshow("pixelstateLQ", cv2.resize(Gen_Image.create_image_rgb_from_pixel_state(matrix_level_LQ), (400, 400)))
if make_video_at_the_end:
array_img.append(img)
cv2.waitKey(1) # ne jamais oublié cet ligne de code qui empêche l'image de s'afficher si elle n'est pas la
if nb_second_de_video*28 == len(array_img) and make_video_at_the_end:
break
cv2.destroyAllWindows()
if make_video_at_the_end:
Gen_Image.convert_array_of_image_in_video(array_img, nom_video)
| GuillaumeCariou/I3S_Tutorship_Internship | Python/Log_Luminance/Main.py | Main.py | py | 7,685 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "metavision_designer_engine.Controller",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "metavision_hal.DeviceDiscovery.open",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "metavision_hal.DeviceDiscovery",
"line_number": 22,
"usage_type... |
6103056667 | import os
import argparse
import utils
import json
import numpy as np
from sklearn import metrics
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as T
from models.dgcnn import DGCNN
from models.pointnet import PointNet, feature_transform_regularizer
import transforms as transforms
from data import PointCloudDataset
def test(args):
current_path = os.path.dirname(args.model_path)
with open(os.path.join(current_path, "settings.txt"), 'r') as f:
settings = json.load(f)
args.exp_name = settings["exp_name"]
args.cuda = torch.cuda.is_available()
if args.cuda:
print(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
torch.cuda.manual_seed(settings["seed"])
else:
print('Using CPU')
device = torch.device("cuda" if args.cuda else "cpu")
numClass = settings["num_classes"]
modelType = settings["model"]
if modelType == "pointnet":
model = PointNet(numClass, emb_dims=settings["emb_dims"], dropout_rate=settings["dropout"], feature_transform=settings["transform_regularization"] > 0.0)
elif modelType =="dgcnn":
model = DGCNN(numClass, emb_dims=settings["emb_dims"], dropout_rate=settings["dropout"], k=settings["k"])
else:
raise Exception("Not implemented")
model_state_path = os.path.join(current_path, "model_Both_loss.t7")
# load model
model = torch.load(model_state_path)
model.to(device)
model.eval()
# DataLoaders
test_transforms = T.Compose([transforms.Normalize()])
test_real_dataset = PointCloudDataset(dataDir = args.data_path, partition='Testing', num_points=1024, transforms = test_transforms, data_type = ["real"], binary_data = False)
test_synthetic_dataset = PointCloudDataset(dataDir = args.data_path, partition='Testing', num_points=1024, transforms = test_transforms, data_type = ["synthetic"], binary_data = False)
test_dataset = PointCloudDataset(dataDir = args.data_path, partition='Testing', num_points=1024, transforms = test_transforms, data_type = ["synthetic","real"], binary_data = False)
test_real_loader = DataLoader(test_real_dataset, num_workers=0, batch_size=args.batch_size, shuffle=False, drop_last=False)
test_synthetic_loader = DataLoader(test_synthetic_dataset, num_workers=0, batch_size=args.batch_size, shuffle=False, drop_last=False)
test_loader = DataLoader(test_dataset, num_workers=0, batch_size=args.batch_size, shuffle=False, drop_last=False)
data_settings = [("Real", test_real_loader), ("Synthetic", test_synthetic_loader), ("All", test_loader)]
with torch.no_grad():
for data_setting, data_loader in data_settings:
predicted_labels = []
correct_labels = []
baseDir = os.path.join(current_path, data_setting)
if not os.path.isdir(baseDir):
os.makedirs(baseDir)
incorrectPredDir = os.path.join(baseDir, "incorrect_predictions")
if not os.path.isdir(incorrectPredDir):
os.makedirs(incorrectPredDir)
for data, label in tqdm(data_loader):
data = data.to(device)
data = data.float()
labels = label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
if modelType == "pointnet":
output, _, _ = model(data)
else:
output = model(data)
preds = output.max(dim=1)[1]
correct_labels.append(labels.cpu().numpy())
predicted_labels.append(preds.detach().cpu().numpy())
for i in range(len(labels)):
if labels[i] != preds[i]:
temp_data = data[i].permute(1, 0)
num_csvs = len(os.listdir(incorrectPredDir))
np.savetxt(os.path.join(incorrectPredDir, "prediction{}_{}.csv".format(labels[i], num_csvs)), temp_data.cpu().numpy())
correct_labels = np.concatenate(correct_labels)
predicted_labels = np.concatenate(predicted_labels)
cm = metrics.confusion_matrix(correct_labels, predicted_labels)
print(cm)
precision, recall, f1, support = metrics.precision_recall_fscore_support(correct_labels, predicted_labels, average="weighted")
normal_accuracy = metrics.accuracy_score(correct_labels, predicted_labels)
average_accuracy = metrics.balanced_accuracy_score(correct_labels, predicted_labels)
np.savetxt(os.path.join(baseDir, "ConfusionMatrix.txt"), cm)
with open(os.path.join(baseDir, "metrics.txt"), "w") as f:
f.write("Precision: {}\n".format(precision))
f.write("Recall: {}\n".format(recall))
f.write("F1: {}\n".format(f1))
f.write("Support: {}\n".format(support))
f.write("Average: {}\n".format(normal_accuracy))
f.write("Balanced Average: {}\n".format(average_accuracy))
print(precision, recall, f1, support, normal_accuracy, average_accuracy)
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='Settings for Point Cloud Classification Test')
parser.add_argument('--exp_name', type=str, default='', metavar='N',
help='Name of the experiment')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
parser.add_argument('--data_path', type=str, default='', metavar='N',
help='Dataset path')
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch. (Default: 32)')
args = parser.parse_args()
test(args) | leoriczhang/3d-pointcloud- | test_cls.py | test_cls.py | py | 6,038 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
9204282514 | import sys
import os
import time
import traceback
import numpy as np
import torch
import torchvision
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.autograd import Variable
from tqdm import tqdm
#conda install tqdm
from collections import OrderedDict
sys.path.append('../')
from SOL4Py.torch.ZTorchModel import ZTorchModel
from SOL4Py.torch.ZTorchEpochChangeNotifier import ZTorchEpochChangeNotifier
from SOL4Py.torch.ZTorchModelCheckPoint import ZTorchModelCheckPoint
##
# ZTorchSimpleModel
class ZTorchSimpleModel(ZTorchModel):
#
# Constructor
def __init__(self, image_size, n_classes, model_filename):
super(ZTorchSimpleModel, self).__init__(image_size, n_classes, model_filename)
self.n_classes = n_classes
self.image_size = image_size;
ch, h, w = image_size
print("ch:{} h:{} w:{}".format(ch, h, w))
self.model_filename = model_filename
# The following is based on AlexNet
self.features = nn.Sequential(
nn.Conv2d(ch, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
#nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.Conv2d(384, h, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(h, h, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.n_features = 256 * 4 * 4
if h == 32:
self.n_features = h * 4 * 4 # 512
if h == 64:
self.n_features = h * 8 * 8 # 4096
if h == 96:
self.n_features = h * 9216 # 884736
if h == 128:
self.n_features = h * h * h # 2097152
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(self.n_features, self.n_features),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(self.n_features, self.n_features),
nn.ReLU(inplace=True),
nn.Linear(self.n_features, n_classes)
)
def forward(self, input):
output = self.features(input)
output = output.view(output.size(0), self.n_features)
output = self.classifier(output)
return output
| sarah-antillia/SOL4Py_V4 | SOL4Py/torch/ZTorchSimpleModel.py | ZTorchSimpleModel.py | py | 2,551 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "SOL4Py.torch.ZTorchModel.ZTorchModel",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "to... |
23315419953 | #!/usr/bin/env python
from setuptools import setup, find_packages
tests_require = [
'nose',
'unittest2',
]
setup(
name='quickunit',
version='0.6.0',
author='David Cramer',
author_email='dcramer@gmail.com',
description='A discovery plugin for Nose which relies on sane structure.',
url='http://github.com/dcramer/quickunit',
packages=find_packages(exclude=["tests"]),
zip_safe=False,
entry_points={
'nose.plugins.0.10': [
'quickunit = quickunit.plugin:QuickUnitPlugin'
],
'console_scripts': [
'quickunit-finder = quickunit.scripts.finder:main',
],
},
license='Apache License 2.0',
tests_require=tests_require,
extras_require={
'tests': tests_require,
},
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
| dcramer/quickunit | setup.py | setup.py | py | 1,025 | python | en | code | 34 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 17,
"usage_type": "call"
}
] |
24179539315 | """Helper functions for iterables."""
import collections
from itertools import islice, zip_longest
from typing import Callable, Iterable, Iterator, Optional, TypeVar
T = TypeVar("T")
# Courtesy of https://docs.python.org/3/library/itertools.html
def grouper(
iterable: Iterable[T],
n: int,
*,
incomplete: str = "fill",
fillvalue: Optional[T] = None,
) -> Iterable[Iterable[T]]:
"""Collect data into non-overlapping fixed-length chunks or blocks."""
# grouper('ABCDEFG', 3, fillvalue='x') --> ABC DEF Gxx
# grouper('ABCDEFG', 3, incomplete='strict') --> ABC DEF ValueError
# grouper('ABCDEFG', 3, incomplete='ignore') --> ABC DEF
args = [iter(iterable)] * n
if incomplete == "fill":
return zip_longest(*args, fillvalue=fillvalue)
if incomplete == "strict":
return zip(*args, strict=True)
if incomplete == "ignore":
return zip(*args)
else:
raise ValueError("Expected fill, strict, or ignore")
# Also thank to https://docs.python.org/3/library/itertools.html
def sliding_window(iterable: Iterable[T], n: int) -> Iterable[tuple[T, ...]]:
"""Iterate over an iterable as a sliding window."""
# sliding_window('ABCDEFG', 4) --> ABCD BCDE CDEF DEFG
it = iter(iterable)
window = collections.deque(islice(it, n), maxlen=n)
if len(window) == n:
yield tuple(window)
for x in it:
window.append(x)
yield tuple(window)
def read_iter_until(
data: Iterator[T],
terminators: Optional[set[T]] = None,
pred: Optional[Callable[[T], bool]] = None,
) -> tuple[list[T], Optional[T]]:
"""Read an iterator until a terminator, returning the read string and the terminator."""
result: list[T] = []
for i in data:
if terminators is not None and i in terminators:
return result, i
if pred is not None and pred(i):
return result, i
result.append(i)
return result, None
| NickG123/AdventOfCode2022 | utils/iterables.py | iterables.py | py | 1,958 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TypeVar",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "typing.Iterable",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "itertools.zip_longest",... |
72775441703 | # -*- coding: utf-8 -*-
"""
Created on Sat May 9 07:43:03 2020
@author: Das
"""
# Importing Essential Libraries
import nltk
import random
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from nltk.tokenize import word_tokenize
#reading file
positiv = open("dataset/positive.txt","r").read()
negativ = open("dataset/negative.txt","r").read()
#list for feature
all_words = []
final = []
#"j" for adjectives by this we're allowing type Adjectives only
#j is adject , r is adverb, v is verb
allwd_word = ["J"]
for p in positiv.split('\n'):
final.append( (p, "pos") ) #adding positive label to data
words = word_tokenize(p) #tokenizing
pos = nltk.pos_tag(words) #
for w in pos:
if w[1][0] in allwd_word:
all_words.append(w[0].lower())
for p in negativ.split('\n'):
final.append( (p, "neg") ) #adding negative label to data
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allwd_word:
all_words.append(w[0].lower())
all_words = nltk.FreqDist(all_words) #frequency to the words i.e. count
#limiting features for better results
word_feat = list(all_words.keys())[:5000]
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_feat:
features[w] = (w in words)
return features
featuresets = [(find_features(rev), category) for (rev, category) in final]
#shuffling features as the first half is positive and second is negative
random.shuffle(featuresets)
#dividing features to training and test set
testing_set = featuresets[10000:] #10000 for training
training_set = featuresets[:10000] # rest 664 for testing
#applying MultinomialNB to training and testing its accuracy
#gives accuracy = 71-72%
"""MNB_clf = SklearnClassifier(MultinomialNB())
MNB_clf.train(training_set)
print("MNB_classifier accuracy percent:", (nltk.classify.accuracy(MNB_clf, testing_set))*100)
"""
#applying BernoulliNB to training and testing its accuracy
#gives accuracy = 72-74%
BNB_clf = SklearnClassifier(BernoulliNB())
BNB_clf.train(training_set)
#print("BernoulliNB_classifier accuracy percent:", (nltk.classify.accuracy(BNB_clf, testing_set))*100)
#applying Decision Tree to training and testing its accuracy
#gives accuracy = 62-65%
"""dct_clf = SklearnClassifier(DecisionTreeClassifier())
dct_clf.train(training_set)
print("Decision Tree Classifier accuracy percent:", (nltk.classify.accuracy(dct_clf, testing_set))*100)"""
def sentiment(text):
feats = find_features(text)
v = BNB_clf.classify(feats)
return v
| DasBhai/MNCRankingRealtime | mod_sentiment.py | mod_sentiment.py | py | 2,815 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "nltk.pos_tag",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "... |
8746369027 | import requests
import numpy as np
from phe import paillier
import math
api_address = "http://localhost:8000"
N_FEATURES = 10
PRECISION = 2**(-16)
EXP = -8
TEST_DATA = [0.48555949, 0.29289251, 0.63463107, 0.41933057, 0.78672205, 0.58910837, 0.00739207, 0.31390802, 0.37037496, 0.3375726]
TEST_PRED = 0.44812144746653826
def encrypt_object(datapoint, public_key):
return [public_key.encrypt(x, precision=PRECISION).ciphertext() for x in datapoint]
def generate_stealing_data():
stealing_data = []
for _ in range(N_FEATURES + 1):
stealing_data.append(np.random.uniform(0, 1, N_FEATURES).tolist())
return stealing_data
def discover_model(data, preds):
N = len(data)
X = np.ones((N, N_FEATURES + 1))
X[:, 1:] = np.array(data)
y = np.array(preds)
coeffs = np.linalg.solve(X,y)
bias = coeffs[0]
weights = coeffs[1:]
return weights, bias
def main():
# create a session
session = requests.session()
public_key, private_key = paillier.generate_paillier_keypair()
stealing_data = generate_stealing_data()
preds = []
for _, datapoint in enumerate(stealing_data):
encrypted_list = encrypt_object(datapoint, public_key)
r = session.post(f"{api_address}/prediction", json={"pub_key_n": public_key.n , "enc_feature_vector": encrypted_list}).json()
encrypted_pred = paillier.EncryptedNumber(public_key, int(r["enc_prediction"]), EXP)
res = private_key.decrypt(encrypted_pred)
preds.append(res)
weights, bias = discover_model(stealing_data, preds)
print(f'weights:\n{weights}')
print(f'bias: {bias:.3f}')
stolen_pred = np.dot(TEST_DATA, weights) + bias
assert 2**(-16) > abs(stolen_pred - TEST_PRED), "Prediction is not correct"
print('All good!')
if __name__ == '__main__':
main() | arinaruck/isp_2022 | hw8/steal_model.py | steal_model.py | py | 1,831 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.uniform",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"... |
10902517011 | from django.test import TestCase
from home.models import UserProfile
class UserProfileModelTestCase(TestCase):
@classmethod
def setUpTestData(cls):
# Create test data before running the tests
UserProfile.objects.create(
name='John Doe',
email='johndoe@example.com',
bio='Lorem ipsum dolor sit amet',
profile_picture='path/to/profile_picture.jpg'
)
def test_create_user_profile(self):
# Retrieve the created user profile from the database
user_profile = UserProfile.objects.get(email='johndoe@example.com')
# Perform assertions to check if the created user profile matches the provided data
self.assertEqual(user_profile.name, 'John Doe')
self.assertEqual(user_profile.email, 'johndoe@example.com')
self.assertEqual(user_profile.bio, 'Lorem ipsum dolor sit amet')
self.assertEqual(user_profile.profile_picture,
'path/to/profile_picture.jpg')
| shubhamkhuntia/superU-Assignment | home/tests.py | tests.py | py | 1,010 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "home.models.UserProfile.objects.create",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "home.models.UserProfile.objects",
"line_number": 9,
"usage_type": "attribute"
... |
9324455473 | import os
from dotenv import load_dotenv
import discord
from discord import app_commands
from langchain.llms import OpenAI
# Load the .env file
load_dotenv()
token = os.getenv('DISCORD_TOKEN')
guild_id = os.getenv('DISCORD_GUILD')
openai_key = os.getenv('OPENAI_KEY')
class Client(discord.Client):
def __init__(self):
super().__init__(intents=discord.Intents.default())
self.synced = False
async def on_ready(self):
await self.wait_until_ready()
if not self.synced:
await tree.sync(guild = discord.Object(id = guild_id))
self.synced = True
print(f"Logged in as {self.user} (ID: {self.user.id}).")
print("-----")
client = Client()
tree = app_commands.CommandTree(client)
@tree.command(name = "test", description = "testing", guild = discord.Object(id = guild_id))
async def self(interaction: discord.Interaction, name: str):
await interaction.response.send_message(f"Hello {name}! I was made with Discord.py!")
@tree.command(name="llm", description="Language Model", guild = discord.Object(id = guild_id))
async def self(interaction: discord.Interaction, prompt: str):
llm = OpenAI(openai_api_key=openai_key, temperature=0.9)
await interaction.response.send_message(llm.predict(prompt))
if __name__ == "__main__":
client.run(token)
| conradomanclossi/Trends | bot/main.py | main.py | py | 1,334 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 1... |
74955138663 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import requests
SNIPPET_TEMPLATE = """<snippet>
<content><![CDATA[
{}
]]></content>
<description>{}</description>
<scope>source.python</scope>
<tabTrigger>{}</tabTrigger>
</snippet>"""
def _request_code(target_url):
"""Request a code file."""
r = requests.get(target_url)
code = r.text.encode('utf-8')
return code
def _find_functions(code):
"""Find functions from the code."""
# find non-private functions and the first line of their doc-string
function_pattern = '(@.*\n)?.*def ([^_].*)\((.*)\):\n.*?"""(.*)'
# find and return all of the functions
functions = re.findall(function_pattern, code)
return functions
def main(args):
"""Create sublime text snippets automatically from code."""
CLASS_PREFIX = args['<class_prefix>']
# define the prefix used when saving a new snippet and for the snippet's tab trigger
if args['--snippet_prefix']:
SNIPPET_PREFIX = args['--snippet_prefix']
else:
SNIPPET_PREFIX = CLASS_PREFIX
# if we are pulling the code from a location on the internet (e.g. a raw file on github), get the code
if args['<target_file>'].startswith("http"):
code = _request_code(args['<target_file>'])
else:
with open(args['<target_file>'], 'r') as f:
code = f.read().encode('utf-8')
# find all of the functions in the code
functions = _find_functions(code)
# create a snippet for each function (as appropriate)
for function in functions:
# remove the newline from the end of the decorator
decorator = function[0].strip()
# handle a property decorator
if decorator == "@property":
pass
# handle functions
else:
# split up the arguments for the function
arguments = function[2].split(", ")
# remove 'self' as an argument to the function (just move on if 'self' isn't an argument)
try:
arguments.remove('self')
except ValueError as e:
pass
# create a string with the arguments to the function formatted for a sublime text snippet
argument_string = ""
count = 0
for argument in arguments:
count += 1
argument_string += "${"
argument_string += "{}:{}".format(count, argument)
argument_string += "}"
# if there are more arguments, add a trailing comma
if count < len(arguments):
argument_string += ", "
# create a name for the snippet based on the name of the function
snippet_name = SNIPPET_PREFIX + function[1].replace("_", "")
# create a description for the snippet based on the function's doc string
snippet_description = function[3].replace('"""', '')
# create snippet code (with the class prefix in front of it)
snippet_code = CLASS_PREFIX + "." + function[1] + "(" + argument_string + ")"
# create a snippet
new_snippet = SNIPPET_TEMPLATE.format(snippet_code, snippet_description, snippet_name)
# write the new snippet
with open(args['<output_directory>'] + '/{}.sublime-snippet'.format(snippet_name), 'w') as f:
f.write(new_snippet)
if __name__ == '__main__':
main()
| fhightower/sublime-snippet-creator | sublime_snippet_creator/sublime_snippet_creator.py | sublime_snippet_creator.py | py | 3,465 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 32,
"usage_type": "call"
}
] |
31762824166 | from collections import deque
def bfs():
dx = [0,1,0,-1]
dy = [1,0,-1,0]
while loc:
x_y = loc.popleft()
y, x = x_y[0], x_y[1]
for i in range(4):
nX = x + dx[i]
nY = y + dy[i]
if nX < 0 or nX >= m or nY < 0 or nY >= n: continue
if maze[nY][nX] != 1: continue
loc.append((nY, nX))
maze[nY][nX] = maze[y][x] + 1
if __name__ == "__main__":
n, m = map(int, input().split())
maze = [[int(i) for i in input()] for j in range(n)]
loc = deque([])
loc.append((0,0))
bfs()
print(maze[n-1][m-1])
| 4RG0S/2020-Spring-Jookgorithm | 이승민/[20.03.30]2178.py | [20.03.30]2178.py | py | 626 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 26,
"usage_type": "call"
}
] |
73137302504 | """factRequestForDocuments
Revision ID: 489ca98de532
Revises: d6c3af32b13e
Create Date: 2022-01-26 22:39:14.403851
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '489ca98de532'
down_revision = 'd6c3af32b13e'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('factRequestForDocuments',
sa.Column('actionid', sa.Integer(), nullable=False),
sa.Column('foirequestid', sa.Integer(), nullable=False),
sa.Column('runcycleid', sa.Integer(), nullable=False),
sa.Column('actiontype', sa.Integer()),
sa.Column('description', sa.VARCHAR(length=4000)),
sa.Column('priority', sa.VARCHAR(length=3000)),
sa.Column('emailaddress', sa.VARCHAR(length=4000)),
sa.Column('createddate', sa.DateTime()),
sa.Column('actiondate', sa.DateTime()),
sa.Column('duedate', sa.DateTime()),
sa.Column('responsedate', sa.DateTime()),
sa.Column('parentactionid', sa.Integer()),
sa.Column('createdby', sa.VARCHAR(length=200)),
sa.Column('subject', sa.VARCHAR(length=150)),
sa.Column('programofficeid', sa.Integer()),
sa.Column('reqfordocstatusid', sa.Integer()),
sa.Column('completeddate', sa.DateTime()),
sa.Column('requestofficeid', sa.Integer()),
sa.Column('visiblerequestid', sa.VARCHAR(length=50)),
sa.Column('requestdescription', sa.VARCHAR(length=4000)),
sa.Column('officeid', sa.Integer()),
sa.Column('requesttypeid', sa.Integer()),
sa.Column('overduedays', sa.Integer()),
sa.Column('elapseddays', sa.Integer()),
sa.Column('passduedays', sa.Integer()),
sa.Column('rfdage', sa.Integer()),
sa.Column('remainingdays', sa.Integer()),
sa.Column('methodofdelivery', sa.VARCHAR(length=1)),
sa.ForeignKeyConstraint(['reqfordocstatusid'], ['dimRequestForDocumentsStatus.reqfordocstatusid']),
sa.PrimaryKeyConstraint('actionid', 'foirequestid', 'runcycleid')
)
def downgrade():
op.drop_table('factRequestForDocuments')
| bcgov/foi-reporting | datawarehouse/edw_migrations/versions/489ca98de532_factrequestfordocuments.py | 489ca98de532_factrequestfordocuments.py | py | 1,937 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
18932220810 | import os
import time
import datetime
from pathlib import Path
from datetime import datetime
import psutil
import math
class Location(str):
def __call__(self, *args) -> 'Location':
return Location(self.format(*args))
def exists(self):
return Path(self).exists()
def read(self):
with open(self, 'r') as f:
return f.read()
def write(self, value):
with open(self, 'w') as f:
f.write(value)
class Locations:
PROC_CMDLINE = Location("/proc/{}/cmdline")
PROC_CGROUP = Location("/proc/{}/cgroup")
PROC_SMAP_ROLLUP = Location("/proc/{}/smaps_rollup")
CGROUP_DIR = Location("/sys/fs/cgroup{}")
MEM_CURRENT = Location("{}/memory.current")
MEM_STAT = Location("{}/memory.stat")
MEM_PRESSURE = Location("{}/memory.pressure")
MEM_HIGH = Location("{}/memory.high")
MEMINFO = Location("/proc/meminfo")
class ProcessList:
def __init__(self):
self.pids = {}
def refresh(self):
for process in os.listdir("/proc/"):
if (not process.isdigit()) or (process in self.pids):
continue
pid = int(process)
if not Locations.PROC_CMDLINE(pid).exists():
continue
try:
self.pids[pid] = {
"pid": pid,
"cmdline": Locations.PROC_CMDLINE(pid).read().replace('\0', ' ').strip(),
}
except:
continue
def find(self, name):
return [process for process in self.pids.values() if name == process["cmdline"]]
def find_in(self, name):
return [process for process in self.pids.values() if name in process["cmdline"]]
def __str__(self):
return "\n".join(
"pid: {}\t cmdline: {}".format(pid['pid'], pid['cmdline'])
for pid in self.pids.values()
)
# func found on https://sleeplessbeastie.eu/2019/06/10/how-to-display-memory-used-by-processes-in-human-readable-form-using-python-script/
def pretty(nbytes):
metric = ("B", "kB", "MB", "GB", "TB")
if nbytes == 0:
return "%s %s" % ("0", "B")
nunit = int(math.floor(math.log(nbytes, 1024)))
nsize = round(nbytes/(math.pow(1024, nunit)), 2)
return '%s %s' % (format(nsize, ".2f"), metric[nunit])
if __name__ == "__main__":
process_list = ProcessList()
process_list.refresh()
container = process_list.find("/controller /")
import sys
output_file = sys.argv[1]
with open(output_file, 'w') as file_object:
file_object.write('time;uss;pss\n')
for i in range(100000000000000):
p = psutil.Process(container[0]["pid"])
meminfo = p.memory_full_info()
uss = meminfo.uss
pss = meminfo.pss
print(f"uss {pretty(p.memory_full_info().uss)}, pss={pretty(p.memory_full_info().pss)} ")
with open(output_file, 'a') as file_object:
file_object.write(f'{datetime.now()};{uss};{pss}\n'.replace(".", ","))
time.sleep(0.2)
| idlab-discover/wasm-operator | profile/wasmProfiler.py | wasmProfiler.py | py | 3,043 | python | en | code | 47 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 77,
... |
74962738343 | from rest_framework.serializers import ModelSerializer
from rest_framework import exceptions
from api.models import VDO
from django_celery_results.models import TaskResult
class VDOSerializer(ModelSerializer):
def create(self, validated_data):
newvdo_record = self.Meta.model.objects.create(**validated_data)
return newvdo_record
class Meta:
model = VDO
fields = '__all__'
class StatusSerializer(ModelSerializer):
def create(self, validated_data):
newvdo_record = self.Meta.model.objects.create(**validated_data)
return newvdo_record
class Meta:
model = TaskResult
fields = 'task_id','status','result'
| 6410615147/cartrack | cartrack/api/serializer.py | serializer.py | py | 688 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "api.models.VDO",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 16,
"usage_type": "n... |
23306322275 | import click
import csv
import itertools
from Bio import motifs
from Bio.Seq import Seq
from Bio.SeqUtils import GC
from Bio.Alphabet import generic_dna
from Bio.Alphabet.IUPAC import unambiguous_dna
def homer_parse(fstream):
def build(name, freq):
m = motifs.Motif(counts=freq)
m.name = name
return m
nct = "ACGT"
name = ""
mtx = {a: [] for a in nct}
for line in fstream:
if line.startswith('>'):
if name != '':
yield build(name, mtx)
name = line.rstrip().split()[1]
mtx = {a: [] for a in nct}
else:
score = [float(x) for x in line.rstrip().split("\t")]
for i, a in enumerate(nct):
mtx[a].append(score[i])
if name != '':
yield build(name, mtx)
def fasta_iter(istream):
name = None
sequence = ''
for line in istream:
if line.startswith('>'):
if name is not None:
yield(name, sequence)
name = line.rstrip().replace('>', '')
sequence = ''
else:
sequence += line.rstrip()
if name is not None:
yield (name, sequence)
def mutation_iter(istream):
for line in istream:
if line.startswith('#'):
continue
oligo, pos, name, ref, alt = line.rstrip().split()
if ('N' in ref) or ('N' in alt):
continue
yield (oligo, int(pos), name, ref, alt.split(','))
def progress(iter, freq=100):
count = 0
rotator = 0
label = ("|", "/", "-", "\\")
for i in iter:
yield(i)
count += 1
if count % freq == 0:
rotator = (rotator + 1) % 4
click.echo("[%s] %6d\r" % (label[rotator], count), nl=False, err=True)
@click.command()
@click.option('--motif', '-m', type=click.File('r'), required=True)
@click.option('--reference', '-r', type=click.File('r'), required=True)
@click.option('--mutation', '-M', type=click.File('r'), default='-')
@click.option('--out', '-o', type=click.File('w'), default='-')
def fit_motif(motif, reference, mutation, out):
click.echo("Loading reference sequences.", err=True)
oligos = { name: seq for (name, seq) in progress(fasta_iter(reference)) }
click.echo("Loading motifs matrix.", err=True)
motifs = { _name(m) : m for m in progress(homer_parse(motif)) }
reader = mutation_iter(mutation)
writer = csv.writer(out, delimiter='\t')
writer.writerow(['oligo', 'rsid', 'ref', 'alt', 'tf', 'ref_score', 'alt_score',
'score', 'flank_gc', 'core_gc'])
click.echo("Progessing mutations.", err=True)
for ((oligo, pos, rsid, ref, alts), tf) in progress(itertools.product(reader, motifs)):
sequence = oligos[oligo]
motif = motifs[tf]
refseq = sequence[:pos] + ref + sequence[(pos+1):]
refat, refscore = _score(motif, refseq)
refgc = GC(flank(refseq, refat, refat + len(motif)))
refcore_gc = GC(sequence[refat:(refat+len(motif))])
for alt in alts:
altseq = sequence[:pos] + alt + sequence[(pos+1):]
altat, altscore = _score(motif, altseq)
if altscore > refscore:
flank_gc = GC(flank(altseq, altat, altat + len(motif)))
core_gc = GC(sequence[altat:(altat+len(motif))])
else:
flank_gc = refgc
core_gc = refcore_gc
writer.writerow([oligo, rsid, ref, alt, tf, refscore, altscore,
refscore-altscore, flank_gc, core_gc])
def _score(motif, seq):
seq = Seq(seq, unambiguous_dna)
pssm = motif.pssm
fw_scores = pssm.calculate(seq)
rv_scores = pssm.calculate(seq.reverse_complement())
fw_index = max(range(len(fw_scores)), key=fw_scores.__getitem__)
rv_index = max(range(len(rv_scores)), key=rv_scores.__getitem__)
if fw_scores[fw_index] > rv_scores[rv_index]:
return fw_index, fw_scores[fw_index]
else:
index = len(seq) - len(motif) - rv_index
return index, rv_scores[rv_index]
def flank(seq, start, stop, size=10):
bgupper = max([0, start-size])
endown = min([len(seq), stop+size])
return seq[bgupper:start] + seq[stop:endown]
def _name(motif):
name = motif.name.split('/')[0]
return name.upper().replace('-', '_')
if __name__ == '__main__':
fit_motif()
| ren-lab/snp-selex | pwm/score.py | score.py | py | 4,389 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "Bio.motifs.Motif",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "Bio.motifs",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "click.echo",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number"... |
73434596264 | import pandas as pd
import json
import requests
# # ----------from fred---------------
# api_keys = ["36211f27396765eca92b93f01dca74db", "4808384bf945022005347fcf2f6957fb",
# "a66fc3e61d360c6088b022f2c06c831c", "1836f996f9157acd994d59547bb0f65c",
# "4e2f7a3a68190b6584419017414974d5", "bc4c30a690776f1662cd6e8f5d30f3ce",
# "3cd3452ab1a374f571db4822fd6f359a", "ad8557a75ffc5f367492cb34b67539e8"]
#
# api_endpoint = "https://api.stlouisfed.org/fred/series/observations"
# params = {
# "series_id": 'GNPCA',
# "api_key": api_keys[1],
# "file_type": "json"
# }
# response = requests.get(api_endpoint, params=params)
# data_dict = response.json()
# print(data_dict)
# # Extract observations
# observations = data_dict['observations']
#
# # Create DataFrame
# df = pd.DataFrame(observations)
# df_final = pd.DataFrame()
#
# # Convert columns to appropriate data types
# df_final['date'] = pd.to_datetime(df['date'])
# df_final['value'] = pd.to_numeric(df['value'])
#
# print(df_final)
# df_final.to_csv('economic_data.csv', index=False)
# loaded_df_csv = pd.read_csv('economic_data.csv')
# print("Loaded DataFrame from CSV:")
# print(loaded_df_csv)
# ----------from nasdaq---------------
import nasdaqdatalink
nasdaqdatalink.read_key(filename="../api_keys/nasdaq1.txt")
# nasdaqdatalink.bulkdownload('FRED')
df1 = pd.read_csv('../../files/data/FRED_metadata_plus_categories.csv')
codes = df1['code']
# print(df1['code'])
nasdaq_code = []
for code in codes:
nasdaq_code.append('FRED/'+code)
data = nasdaqdatalink.get(nasdaq_code)#['FRED/00XALCATM086NEST', 'FRED/00XALCBEM086NEST'])
print(data.info())
print(data)
data.to_csv('economic_data2.csv', index=True)
loaded_df_csv = pd.read_csv('economic_data2.csv')
print("Loaded DataFrame from CSV:")
print(loaded_df_csv)
| stergioa/masterThesis4 | src/download_data/download_timeseries_values.py | download_timeseries_values.py | py | 1,818 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nasdaqdatalink.read_key",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "nasdaqdatalink.get",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pandas.rea... |
23616237270 | debug_queue = "queue", "inter"
""" How to select the debug queue.
First part of the tuple is the keyword argument to modify when calling
the pbs job, and the second is its value.
"""
accounts = ["BES000"]
""" List of slurm or pbs accounts allowed for use.
This is used by ipython's %launch magic function.
It is not required for slurm systems.
If empty, then %launch will not have a queue option.
"""
qsub_exe = "sbatch"
""" Qsub executable. """
default_pbs = { 'account': accounts[0], 'walltime': "06:00:00", 'nnodes': 1, 'ppn': 8}
""" Defaults parameters filling the pbs script. """
pbs_string = "#! /bin/bash\n"\
"#SBATCH --account={account}\n"\
"#SBATCH --time={walltime}\n"\
"#SBATCH -N {nnodes}\n"\
"#SBATCH -e \"{err}.%j\"\n"\
"#SBATCH -o \"{out}.%j\"\n"\
"#SBATCH -J {name}\n"\
"#SBATCH -D {directory}\n\n"\
"python {scriptcommand}\n"
""" Default slurm script. """
default_comm = { 'n': 2, 'ppn': default_pbs['ppn'] }
""" Default mpirun parameters. """
mpirun_exe = "mpirun -np {n} {placement} numa_wrapper -ppn={ppn} {program}"
""" Command-line to launch external mpi programs. """
def ipython_qstat(self, arg):
""" squeue --user=`whoami` -o "%7i %.3C %3t -- %50j" """
from subprocess import Popen, PIPE
from IPython.utils.text import SList
from getpass import getuser
# finds user name.
whoami = getuser()
squeue = Popen(["squeue", "--user=" + whoami, "-o", "\"%7i %.3C %3t %j\""], stdout=PIPE)
result = squeue.stdout.read().rstrip().split('\n')
result = SList([u[1:-1] for u in result[1:]])
return result.grep(str(arg[1:-1]))
| mdavezac/LaDa | config/redmesa_mpi.py | redmesa_mpi.py | py | 1,720 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "getpass.getuser",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "IPython.utils.text.S... |
33634152186 | from re import A
import time, random
from Yinsh.yinsh_model import YinshGameRule
from template import Agent
from copy import deepcopy
from collections import deque
import numpy as np
THINKTIME = 0.5
C_PARAM = 1.414
class Node():
def __init__(self, state, game_rule=None, agent_id=None, parent=None, parent_action=None):
self.state = state
self.parent = parent
self.id = agent_id
self.parent_action = parent_action
self.children = []
self.game_rule = game_rule
self._untried_actions = self.get_legal_actions(self.state)
self._number_of_visits = 0
self._results = {}
self._results[1] = 0
self._results[-1] = 0
self._score = self.state.agents[self.id].score
# Current node, victory count minus failure count
def q(self):
wins = self._results[1]
loses = self._results[-1]
return wins - loses
# Number of visits to the current node
def n(self):
return self._number_of_visits
# Expand child nodes
def expand(self):
action = self._untried_actions.pop()
current_state = deepcopy(self.state)
next_state = self.game_rule.generateSuccessor(current_state, action, self.id)
child_node = Node(next_state, parent=self, parent_action=action, agent_id=self.id, game_rule=self.game_rule)
self.children.append(child_node)
return child_node
# The logic of generating the tree is to first determine whether the node is the final state
# if not then determine whether it is fully expanded
# if not then continue to expand the children of the node
# otherwise choose a random node from the child nodes as the next node to be expanded
def run_tree_policy(self):
current_node=self
while not current_node.is_terminal_node():
if not current_node.is_fully_expanded():
return current_node.expand()
else:
current_node=current_node.get_random_child()
return current_node
def is_terminal_node(self):
if not self.parent:
return False
elif len(self.get_legal_actions(self.state)) == 1:
return True
else:
return self._score == 3 or self.parent._score == 3
def is_fully_expanded(self):
return len(self._untried_actions) == 0
# Select an optimal node from all the child nodes (next state)
def choose_best_child(self):
# UCT algorithm
try:
choices_weights = [(c.q() / c.n()) + C_PARAM * np.sqrt((2*np.log(self.n()) / c.n())) for c in self.children]
if self.id == 0:
# If the current player is the first player,
# the child node with the greatest weight is selected as the optimal action
print("mct best action so far")
return self.children[np.argmax(choices_weights)]
else:
# If the current player is a backhand,
# the child node with the smallest weight (the state with the lowest first-hand win rate)
# is selected as the optimal action
print("mct found")
return self.children[np.argmin(choices_weights)]
except:
# error process
return self.get_random_child()
def get_random_child(self):
return random.choice(self.children)
def get_legal_actions(self, current_state):
return deepcopy(self.game_rule.getLegalActions(current_state, self.id))
# Self-play simulation, random selection of actions for child nodes until the endgame
def rollout(self):
current_rollout_state = deepcopy(self.state)
final_result = 0
while final_result == 0:
possible_moves = self.get_legal_actions(current_rollout_state)
action = random.choice(possible_moves)
current_rollout_state = deepcopy(self.game_rule.generateSuccessor(current_rollout_state, action, self.id))
# reward?
new_score = current_rollout_state.agents[self.id].score
if new_score == 3 or self._score == 3:
final_result = 1. if new_score > self._score else -1.
break
if len(self.get_legal_actions(current_rollout_state)) == 1:
break
return final_result
# Goes back up and passes the win/loss information to the parent nodes
def backpropagate(self, result):
self._number_of_visits += 1.
if result == 0:
return
self._results[result] += result
if self.parent:
self.parent.backpropagate(result)
# Each node calculates the best action by playing itself
# and records the result, from which the best child node is selected
def calc_best_action(self,stime):
cnt = 0
while time.time()-stime < THINKTIME:
cnt += 1
node = self.run_tree_policy()
reward = node.rollout()
node.backpropagate(reward)
print("mct state",cnt)
return self.choose_best_child()
# Agent class
class myAgent(Agent):
def __init__(self, _id):
super().__init__(_id)
self.id = _id
self.game_rule = YinshGameRule(2)
# BFS search algorithm for len(actions) > 70
def SelectAction_BFS(self, actions, rootstate):
start_time = time.time()
queue = deque([ (deepcopy(rootstate),[]) ])
count =0
# Conduct BFS starting from rootstate.
while len(queue) and time.time()-start_time < THINKTIME:
count +=1
state, path = queue.popleft()
new_actions = self.game_rule.getLegalActions(state, self.id)
for a in new_actions:
next_state = deepcopy(state)
next_path = path + [a]
score = state.agents[self.id].score
new_state = self.game_rule.generateSuccessor(next_state, a, self.id)
reward = new_state.agents[self.id].score > score
if reward:
print("BFS found",count)
return next_path[0]
else:
queue.append((next_state, next_path))
print("BFS random",count)
return random.choice(actions)
# MCTS algorithm for len(actions) <= 70
def SelectAction(self, actions, rootstate):
start_time = time.time()
while time.time()-start_time < THINKTIME:
if len(actions) > 70:
return self.SelectAction_BFS(actions, rootstate)
else:
tree = Node(rootstate, game_rule=self.game_rule, agent_id=self.id)
return tree.calc_best_action(start_time).parent_action
print('mct random')
return random.choice(actions)
| bzr1/automated-agent-for-a-board-game-yinsh- | agents/t_056/mcts.py | mcts.py | py | 7,015 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "copy.deepcopy",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": ... |
71946585384 | from pyspark.sql import SparkSession
from pyspark.sql.functions import lit
spark = (SparkSession
.builder
.appName("files_creator")
.config("spark.sql.sources.partitionOverwriteMode", "dynamic")
.config("hive.exec.dynamic.partition", "true")
.config("hive.exec.dynamic.partition.mode", "nonstrict")
.enableHiveSupport()
.getOrCreate()
)
df = spark.range(100000).cache()
df2 = df.withColumn("partitionCol", lit("p1"))
df2.repartition(200).write.partitionBy("partitionCol").saveAsTable("schema.table")
| actweird/data_repository | Python/files_creator.py | files_creator.py | py | 574 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 4,
"usage_type": "... |
71121334825 | from threading import Thread
import webbrowser, http.server, socketserver
import time;
port_number = 8000
server = None
def startServer(port):
Handler = http.server.SimpleHTTPRequestHandler
global server
server = socketserver.TCPServer(("", port), Handler)
print("Start server at port", port)
server.serve_forever()
def start(port):
thread = Thread(target=startServer, args=[port])
thread.start()
startTime = int(time.time())
while not server:
if int(time.time()) > startTime + 60:
print("Time out")
break
return server
def stop():
if server:
server.shutdown()
def openUrl():
url = "http://localhost:" + str(port_number)
webbrowser.open(url)
print(url + " is opened in browser")
if __name__ == "__main__":
start(port_number)
openUrl()
| ldlchina/Sample-of-WebGL-with-STL-loader | LocalServer.py | LocalServer.py | py | 862 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "http.server.server",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "http.server",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "socketserver.TCPServer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "threading.Th... |
9783557706 | import requests
from bs4 import BeautifulSoup
# 製作一個串列裝所有可輸入的星座代號
# astros = ["牡羊座","金牛座","雙子座","巨蟹座","獅子座","處女座","天秤座","天蠍座","射手座","摩羯座","水瓶座","雙魚座"]
astrosDict = {"牡羊座":"0","金牛座":"1","雙子座":"2","巨蟹座":"3","獅子座":"4","處女座":"5","天秤座":"6","天蠍座":"7","射手座":"8","摩羯座":"9","水瓶座":"10","雙魚座":"11"}
# 組合字串,並印出所有可選星座
# text = ''
# for index,value in enumerate(astros):
# text += value+":"+str(index)+" "
# print(text)
# 儲存使用者輸入之星座
astrosInput = input("請輸入欲查詢的星座名稱:")
# 將使用者輸入之數字結合網址,並送出請求取得網頁
# 透過觀察得知
# 網址的daily_0 iAstro=0 數字與星座相關
# 讓使用者輸入星座
url = "https://astro.click108.com.tw/daily_"+astrosDict[astrosInput]+".php?iAstro="+astrosDict[astrosInput]
r = requests.get(url)
# 將回傳之網頁HTML轉換為可操作格式
soup = BeautifulSoup(r.text, 'html.parser')
# 利用選擇器,選取到今日整體運勢
data = soup.select(".TODAY_CONTENT > p")
# 印出今日星座運勢
print("\n"+astrosInput+"今日的整體運勢為:\n")
print(data[0].text)
print(data[1].text)
# astros[0]="牡羊座";
# astros[1]="金牛座";
# astros[2]="雙子座";
# astros[3]="巨蟹座";
# astros[4]="獅子座";
# astros[5]="處女座";
# astros[6]="天秤座";
# astros[7]="天蠍座";
# astros[8]="射手座";
# astros[9]="摩羯座";
# astros[10]="水瓶座";
# astros[11]="雙魚座";
| byunli/python | 參考_星座.py | 參考_星座.py | py | 1,621 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 23,
"usage_type": "call"
}
] |
13245977761 | #!/usr/bin/env python
# coding: utf-8
# # Jenkins - Monitoring, Building and Running Regressions
#
# This tool was written to ease my daily job because I need to check the regression tests regularly which totally have around 7000 regression tests. It's really such a hassle to do it manually and frequently.
#
# This tool will do the following:
#
# - Run jobs periodically based on cron style configurations
# - Start Jenkins build job to build the jobs
# - Run regressions test jobs
# - Monitor the jobs and re-run those jobs which fail completely or partially
# - Produce reports if requested through
# - Monitor your local Outlook email for commands to start, stop jobs and send reports
#
# Jenkins will be organized the following hierarchical way
#
# - Top View: List of Projects
# - Project View
# - Tab Views - each view represents a branch, i.e., release branch, main branch, feature branch. In the code below, this is called branch view
# - List of Jobs for each tab view
# - Job details consisting of build status, builds, etc
# - There is ONE job for building the application
# - There is ONE job for scheduling the regression job runs
#
# ## Jenkins COLOR DEFINITIONS
#
# Each Jenkins job can have varous color representing the status of the job. Here is a list of them.
#
# - RED("red",Messages._BallColor_Failed(), ColorPalette.RED)
# - RED_ANIME("red_anime",Messages._BallColor_InProgress(), ColorPalette.RED)
# - YELLOW("yellow",Messages._BallColor_Unstable(), ColorPalette.YELLOW)
# - YELLOW_ANIME("yellow_anime",Messages._BallColor_InProgress(), ColorPalette.YELLOW)
# - BLUE("blue",Messages._BallColor_Success(), ColorPalette.BLUE)
# - BLUE_ANIME("blue_anime",Messages._BallColor_InProgress(), ColorPalette.BLUE)
# - GREY("grey",Messages._BallColor_Pending(), ColorPalette.GREY)
# - GREY_ANIME("grey_anime",Messages._BallColor_InProgress(), ColorPalette.GREY)
#
# - DISABLED("disabled",Messages._BallColor_Disabled(), ColorPalette.GREY)
# - DISABLED_ANIME("disabled_anime",Messages._BallColor_InProgress(), ColorPalette.GREY)
# - ABORTED("aborted",Messages._BallColor_Aborted(), ColorPalette.GREY)
# - ABORTED_ANIME("aborted_anime",Messages._BallColor_InProgress(), ColorPalette.GREY)
# - NOTBUILT("nobuilt",Messages._BallColor_NotBuilt(), ColorPalette.GREY)
# - NOTBUILT_ANIME("nobuilt_anime",Messages._BallColor_InProgress(), ColorPalette.GREY)
#
# # How To Use It
#
# The tool will generate a YAML configuration file "jenkins.yaml". Refer to the file for more details.
#
# The jenkins.yaml will have "needChange: Yes". So the first thing to do is:
#
# - Edit jenkins.yaml file to create profiles. Each profile must have the following parameters:
# - jenkinServerUrl: "<master URL of Jenkins server>"
# - userName: "<user name used to login to Jenkins>"
# - password: "<password used to login to Jenkins>"
# - buildJob: "<i.e., .*-Build a regular expression to define the pattern of names of build jobs>"
# - schedulerJob: "<i.e., .*-Scheduler a regular expression to define the pattern of names of scheduler jobs>"
# - regressionJobFilter: "<i.e., (.*Build$|.*Scheduler$) a regular expression defining non-regression jobs>"
# - projectName: "<project name - this is bascially Jenkins top level view name>"
# - branchName: "<tab name or branch name if Jenkins regressions are grouped by branches>"
#
# All the parameters can be defined at the top level or defined at the profile level. For example
#
# ```yaml
# jenkinServerUrl: "http://jenkins.com/"
# userName: "myname"
# password: "mypassword"
#
# buildJob: ".*-Build" # The regular expression patterns, separated by comma, of build jobs
# schedulerJob: ".*-Scheduler" # The regular expression patterns, separated by comma, of scheduler jobs
# skipJob: ".*-MOD" # The regular expression patterns, separated by comma, of jobs to be skipped when rerun
#
# # The false filter for regressions jobs. Any job whose name does not satisfy the regular expression
# # is considered as regression jobs.
# # The patterns, separated by comma.
# regressionJobFilter: ".*Build$,.*Scheduler$"
#
# profiles:
# ReleaseA:
# projectName: "projectA"
# branchName: "Release"
#
# BranchA:
# projectName: "projectA"
# branchName: "Branch"
#
# ReleaseB:
# projectName: "projectB"
# branchName: "Release"
# regressionJobFilter: ".*Build$,.*Scheduler$,.*Others"
# ```
# - Change "needChange: Yes" to "needChange: No"
# - Run the tool as "jenkins_tool.py -p profile_name"
# - If you want to run it from IPython, you can provide the values by changing *argvIPython*. See the Main Program section for details.
# In[1]:
import re
import os
import sys
import getopt
import yaml
import collections
import datetime
import bisect
import os
import glob
import json
import jenkinsapi
import itertools
from tabulate import tabulate
from jenkinsapi.jenkins import Jenkins
from collections import abc
class FrozenJSON:
"""A read-only façade for navigating a JSON-like object
using attribute notation.
Credit: "O'Reilly Fluent Python", Luciano Ramalho
http://www.amazon.com/Fluent-Python-Luciano-Ramalho/dp/1491946008
"""
def __init__(self, mapping):
self.__data = dict(mapping)
def __getattr__(self, name):
if hasattr(self.__data, name):
return getattr(self.__data, name)
else:
return FrozenJSON.build(self.__data[name])
@classmethod
def build(cls, obj):
if isinstance(obj, abc.Mapping):
return cls(obj)
elif isinstance(obj, abc.MutableSequence):
return [cls.build(item) for item in obj]
else:
return obj
# In[2]:
class JenkinsServer(object):
"""
Class representing the Jenkins Server for Branch View
"""
actionTable = {
"red" : { "status" : "Failed"},
"red_anime" : { "status" : "InProgress"},
"yellow" : { "status" : "Unstable"},
"yellow_anime" : { "status" : "InProgress"},
"blue" : { "status" : "Success"},
"blue_anime" : { "status" : "InProgress"},
"grey" : { "status" : "Pending"},
"grey_anime" : { "status" : "InProgress"},
"disabled" : { "status" : "Disabled"},
"disabled_anime" : { "status" : "InProgress"},
"aborted" : { "status" : "Aborted"},
"aborted_anime" : { "status" : "InProgress"},
"nobuilt" : { "status" : "NotBuilt"},
"nobuilt_anime" : { "status" : "InProgress"}
}
commandActor = {
"build" : "build",
"schedule" : "schedule",
"rerun" : "runFailedUnstableJobs",
"failed" : "failedJobReport",
"report" : "jobReport"
}
def __init__(self, jkCfg, profile):
self.jkCfg = jkCfg
self.profile = profile
self._jserver = Jenkins(jkCfg.getValue(profile, "jenkinServerUrl"),
jkCfg.getValue(profile, "userName"),
jkCfg.getValue(profile, "password"))
self._projectView = self._jserver.views[self.jkCfg.getValue(self.profile, "projectName")]
self._branchView = self._projectView.views[self.jkCfg.getValue(self.profile, "branchName")]
def _testConditions(self, rexps, value):
"""
Test the value against a list of regular expressions.
Returns True if any of them matches
"""
if not rexps:
return True
tests = [ re.match(m, value) for m in rexps]
return any(tests)
@property
def jenkinsServer(self):
return self._jserver
@property
def projectView(self):
return self._projectView
@property
def branchView(self):
return self._branchView
def getJobs(self):
"""
Generator returns all types jobs
"""
jlist = self._branchView.get_data(self._branchView.python_api_url(self._branchView.baseurl))["jobs"]
for j in jlist:
job = FrozenJSON(j)
yield job
def getRegressionJobs(self, exclude=None):
"""
Generator returns regressions jobs whose name usually not ends with "Build" or "Scheduler"
exclude is a list of conditions separated by comma. Specify it to override the value from jenkins.yaml
"""
if not exclude:
exclude = self.jkCfg.getValue(self.profile, "regressionJobFilter")
rexps = exclude.split(",")
for j in itertools.filterfalse(self._testConditions(rexps, x.name), self.getJobs()):
yield j
def jobDetails(self, job):
return (
job.name,
JenkinsServer.actionTable[job.color]["status"],
job.lastBuild.number if job.lastBuild is not None else "",
job.lastStableBuild.number if job.lastStableBuild is not None else "",
job.healthReport[0].description
)
def isQueuedOrRunning(self, job):
j = self._jserver.get_job(job.name)
return j.is_queued_or_running()
def isFailedOrUnstable(self, job):
return "red" in job.color or "yellow" in job.color or "notbuilt" in job.color
def isSuccessful(self, job):
return not self.isFailedOrUnstable(job)
def findJob(self, namePattern):
"""
Find the first job based on the name pattern in regular expression.
namePattern is a list of regular expressions separated by comma.
"""
return next(self.findJobs(namePattern))
def findJobs(self, namePattern):
"""
A generator
Find all the jobs based on the name pattern in regular expression.
namePattern is a list of regular expressions separated by comma.
Specify it to override the value from jenkins.yaml
"""
rexps = namePattern.split(",")
return (x for x in self.getJobs() if self._testConditions(rexps, x.name))
def getBuildJobs(self, namePattern=None):
"""
This is to get the job for the Building Job which builds the application.
de=Nonede=None
namePattern is a list of regular expressions separated by comma. By default, the build job should
have a name like ".*-Build", exclude=None, exclude=None
"""
if not namePattern:
namePattern = self.jkCfg.getValue(self.profile, "buildJob")
return self.findJobs(namePattern)
def getSchedulerJobs(self, namePattern=None):
"""
This is to get the job for the Building Job which builds the application.
namePattern is a list of regular expressions separated by comma. By default, the build job should
have a name like ".*-Scheduler"
"""
if not namePattern:
namePattern = self.jkCfg.getValue(self.profile, "schedulerJob")
return self.findJobs(namePattern)
def getJobsReportShort(self, onlyFailedJobs=False):
"""
THIS IS FAST.
Generator returns list of details of jobs. It consists the folloowing data:
"Name", "Status", "HealthReport"
If parameter onlyFailedJobs=True is specified, only failed jobs will be reported.
Failed jobs are those with color RED (FAILED) or YELLOW (UNSTABLE)
Use the following to print a pretty-formated report:
print(tabulate(jserver.getJobsReport(), headers=["Name", "Status", "HealthReport"]))
"""
jobs = self.getJobs()
for job in jobs:
healthReport = "-"
statusValue = None
if self.isFailedOrUnstable(job):
j = self.branchView.get_data(self.branchView.python_api_url(job.url))
if len(j["healthReport"]) > 0:
healthReport = j["healthReport"][0]["description"]
statusValue = JenkinsServer.actionTable[job.color]["status"]
if not onlyFailedJobs:
yield (job.name, statusValue, healthReport)
elif self.isFailedOrUnstable(job):
yield (job.name, statusValue, healthReport)
else:
continue
def jobReport(self):
print(tabulate(self.getJobsReportShort(), headers=["Name", "Status", "HealthReport"]))
def failedJobReport(self):
print(tabulate(self.getJobsReportShort(onlyFailedJobs=True), headers=["Name", "Status", "HealthReport"]))
def anyFailedUnstable(self, skipJob=None):
"""
True if there is any failed or unstable job
"""
rexps = None
if not skipJob:
skipJob = self.jkCfg.getValue(self.profile, "skipJob")
if skipJob:
rexps = skipJob.split(",")
jobs = self.getJobs()
for job in jobs:
if self.isFailedOrUnstable(job):
if not self._testConditions(rexps, job.name):
return True
return False
def anyFailedUnstableNotRunningOrQueued(self, skipJob=None):
"""
True if there is any failed or unstable job which is not queued or running
"""
rexps = None
if not skipJob:
skipJob = self.jkCfg.getValue(self.profile, "skipJob")
if skipJob:
rexps = skipJob.split(",")
jobs = self.getJobs()
for job in jobs:
if self.isFailedOrUnstable(job):
if not self._testConditions(rexps, job.name):
if not self.isQueuedOrRunning(job):
return True
return False
def getJobsSlow(self):
"""
Generator returns jobs
"""
for j, url in self._branchView.get_job_dict().items():
job = FrozenJSON(self._branchView.get_data(self._branchView.python_api_url(url)))
yield job
def getJobsReportDetailed(self, onlyFailedJobs=False):
"""
THIS IS SLOW BECUASE IT CHECKS BUILDS OF EACH JOB
Generator returns list of details of jobs. It consists the folloowing data:
"Name", "Status", "Last Build", "Last Stable Build", "Report"
If parameter onlyFailedJobs=True is specified, only failed jobs will be reported.
Failed jobs are those with color RED (FAILED) or YELLOW (UNSTABLE)
Use the following to print a pretty-formated report:
print(tabulate(jserver.getJobsReport(), headers=["Name", "Status", "Last Build", "Last Stable Build", "Report"]))
"""
jobs = self.getJobsSlow()
for job in jobs:
if not onlyFailedJobs:
yield self.jobDetails(job)
elif self.isFailedOrUnstable(job):
yield self.jobDetails(job)
else:
continue
def startJob(self, job):
if not self.isQueuedOrRunning(job):
jobBuild = self.jenkinsServer.get_job(job.name)
jobBuild.invoke()
def build(self, verbose=True, namePattern=None):
"""
Start the building jobs to build the applications.
verbose=True will print the status.
"""
for job in self.getBuildJobs(namePattern):
if verbose:
print("Starting building job: {}".format(job.name))
self.startJob(job)
def isBuilding(self, namePattern=None):
"""
Return True if any build job is running
"""
for job in self.getBuildJobs(namePattern):
if self.isQueuedOrRunning(job):
return True
return False
def schedule(self, verbose=True, namePattern=None):
"""
Start the scheduling jobs to run the regressions jobs.
verbose=True will print the status.
"""
for job in self.getSchedulerJobs(namePattern):
if verbose:
print("Starting schedule job: {}".format(job.name))
self.startJob(job)
def isScheduling(self, namePattern=None):
"""
Return True if any scheduling job is running
"""
for job in self.getSchedulerJobs(namePattern):
if self.isQueuedOrRunning(job):
return True
return False
def runFailedUnstableJobs(self, verbose=True, skipJob=None):
"""
Start failed or unstable jobs. Provide regular expressions to exclude any job from being started
skipJob, regular expressions separated by comma define the jobs to be skipped
"""
if not skipJob:
skipJob = self.jkCfg.getValue(self.profile, "skipJob")
rexps = skipJob.split(",")
jobs = self.getJobsReportShort(onlyFailedJobs=True)
for job in jobs:
j = collections.namedtuple("JobTemp", ("name", "status", "healthReport"))(*job)
if not self._testConditions(rexps, j.name):
if verbose:
print("Starting job: {}".format(j.name))
self.startJob(j)
def runIt(self, func):
f = getattr(self, JenkinsServer.commandActor[func], None)
if f is not None:
f()
else:
raise ValueError("ERROR: Bad function name '{} = {}'".format(func, JenkinsServer.commandActor[func]))
# In[3]:
"""
The main program
"""
jenkins_yaml = """---
# Jenkins configuraions
# If this value is Yes, this application will not run.
# So change the values below and then change needChange to "No"
needChange: Yes
#------------------------------------------------------
# Values for variables not defined at the profile level
#------------------------------------------------------
# Jenkins' master URL
jenkinServerUrl: "http://jenkins.xyz.net/jenkins/"
# User name and password to login to Jenkins master server
userName: "<username>"
password: "<password>"
buildJob: ".*-Build" # The regular expression patterns, separated by comma, of build jobs
schedulerJob: ".*-Scheduler" # The regular expression patterns, separated by comma, of scheduler jobs
skipJob: ".*-MOD" # The regular expression patterns, separated by comma, of jobs to be skipped when rerun
# The false filter for regressions jobs. Any job whose name does not satisfy the regular expression
# is considered as regression jobs.
# The patterns, separated by comma.
regressionJobFilter: ".*Build$,.*Scheduler$"
#------------------------------------------------------
# Values defined at the top level will be overridden by
# the values defined in profile level
#------------------------------------------------------
profiles:
Release:
projectName: "<project>" # Main Jenkins' main view, mostly one per project
branchName: "Release" # Jenkins sub-views, mostly one per mercurial branch
Branch:
projectName: "<project>" # Main Jenkins' main view, mostly one per project
branchName: "Branch" # Jenkins sub-views, mostly one per mercurial branch
..."""
info = """
==============================================================================================
A new Jenkins configuration file ./jenkins.yaml has been generated.
Before you continue, modify the file accordingly first.
Check the jenkins.yaml for details.
==============================================================================================
"""
class JKCfg(object):
commandActor = {
"list" : "listProfiles"
}
def __init__(self, jkCfg):
self._jkCfg = jkCfg
def getValue(self, profile, name):
if not profile:
return self._jkCfg.get(name, None)
pd = self._jkCfg["profiles"][profile]
defaultValue = self._jkCfg[name] if name in self._jkCfg else None
return pd.get(name, defaultValue)
def listProfiles(self, printList=True):
"""
if printList = True, the list will be printed out to standard output.
Return a list of tuples (profile name, project name, branch name)
"""
ls = []
for k in self._jkCfg["profiles"].keys():
ls.append((k, self.getValue(k, "projectName"), self.getValue(k, "branchName")))
if printList:
print(tabulate(sorted(ls), headers=["profile", "project name", "branch name"]))
return ls
def runIt(self, func):
f = getattr(self, JKCfg.commandActor[func], None)
if f is not None:
f()
else:
raise ValueError("ERROR: Bad function name '{} = {}'".format(func, JKCfg.commandActor[func]))
def runIt(jkCfg, profile, options, cfgOptions):
for cmd in cfgOptions:
jkCfg.runIt(cmd)
if profile is None:
return
for p in profile.split(","):
jserver = JenkinsServer(jkCfg, p)
for cmd in options:
jserver.runIt(cmd)
def main(profile, options, cfgOptions):
generatedNewYaml = False
if not os.path.exists("./jenkins.yaml"):
generatedNewYaml = True
with open("./jenkins.yaml", 'w', encoding='utf-8') as f:
f.write(jenkins_yaml)
with open("./jenkins.yaml", 'r') as f:
jkCfg = JKCfg(yaml.load(f))
if generatedNewYaml:
print(info)
if jkCfg.getValue(None, "needChange"):
print("It seems that you've not change the Jenkins configuration jenkins.yaml yet.\nPlease do so and try it again.")
else:
runIt(jkCfg, profile, options, cfgOptions)
# # Main Program
# In[4]:
def run_from_ipython():
try:
__IPYTHON__
return True
except NameError:
return False
def displayHelpAndExit():
print(
'''
Usage:
python jenkins_tool.py -p profile_name
Options:
-p --profile profile names separated by comma
-r --run re-run all failed and unstable jobs
-b --build build the application
-s --schedule schedule all regressions to run
-f --failed list failed jobs
-t --report list all the jobs
-l --list list all the profiles available
'''
)
#argvIPython = ["-lfr", "-p", "16R1.16R1_PE.1805.1806"]
argvIPython = ["-lfr", "-p", "17R1.7.Branch"]
if __name__ == '__main__':
profile = None
options = []
cfgOptions = []
args = argvIPython if run_from_ipython() else sys.argv[1:]
try:
opts, args = getopt.getopt(args,"hbsrftlp:",["help", "build", "schedule", "rerun", "failed", "report", "list", "profile="])
except getopt.GetoptError:
displayHelpAndExit()
for opt, arg in opts:
if opt in ("-h", "--help"):
profile = None
elif opt in ("-p", "--profile"):
profile = arg
elif opt in ("-b", "--build"):
options.append("build")
elif opt in ("-s", "--schedule"):
options.append("schedule")
elif opt in ("-r", "--rerun"):
options.append("rerun")
elif opt in ("-f", "--failed"):
options.append("failed")
elif opt in ("-t", "--report"):
options.append("report")
elif opt in ("-l", "--list"):
cfgOptions.append("list")
if not profile and not cfgOptions:
displayHelpAndExit()
else:
main(profile, options, cfgOptions)
print("\nDone")
# # Test Areas - Remove the below if export it to python
# In[5]:
ipythonTest = False
# In[6]:
if ipythonTest:
with open("./jenkins.yaml", 'r') as f:
jkCfg_ = JKCfg(yaml.load(f))
jserver_ = JenkinsServer(jkCfg_, "16R1.Branch")
| wy8162/hggraph | jenkins_tools.py | jenkins_tools.py | py | 24,012 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.abc.Mapping",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "collections.abc",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "collections.abc.MutableSequence",
"line_number": 138,
"usage_type": "attribute"
},
{
... |
42736810973 | # -*- coding: utf-8 -*-
"""
Created on Sun May 2 10:11:50 2021
@author: Simon
"""
# Imports
import matplotlib.pyplot
import csv
from bs4 import BeautifulSoup
import requests
import particlemove # Associated file used to move particles
import tkinter as tk
import time
# Model variables
num_of_iterations = 200 # This no. allows all particles to land in high wind
num_of_particles = 0 # This creates the object it is updated in the GUI
wind_speed = 0 # This is updated in the GUI
# Creating lists
particles = [] #Used to hold particle data
citydata = [] #Used to hold bombsite and then particle landing position data
environment = [] #Used to hold a digital elevation model
# Start a timer to time the code
start = time.time() #Used for measuring how long it takes at the end
# Setting up GUI, this part of the code runs before main() at the bottom
# The overall strucutre and some elements of the GUI were developed based
# on tutorials under the name The New Boston found on You Tube (see references)
# This is the routine that runs when the user clicks the run button
def routine():
"""The function called by pressing Run in the GUI, collects user inputs"""
global num_of_particles # Used in other functions so made global
num_of_particles = int(my_entry.get())
print("The number of particles is", num_of_particles)
global wind_speed
wind_speed = int(scale_widget.get())
print("The wind speed is ", wind_speed)
global topography # Used to select the type of surface or basemap
topography = str(listbox_widget.get(tk.ANCHOR))
print("The model uses a", topography)
# The functions below print help instructions when selected
def particlehelp():
"""Prints some help relating to particle numbers when requested in GUI"""
print("HELP")
print("The number of particles chosen will influence the speed")
print("5000 particles will typically take about 5 seconds")
print("If you are interested in rare events where particles land far from")
print("the main landing area then you may want to use more particles")
def windspeedhelp():
"""Prints some help relating to windspeed when requested in the GUI"""
print("HELP")
print("The higher windspeed causes more turbulence")
print("Resulting in particles being more dispersed")
print("The wind direction is fixed as Easterly")
print("In strong wind (above 6) the particles are blown further East")
def surfacehelp():
"""Prints some help relating to the surface choice when requested in GUI"""
print("HELP")
print("The basic model assumes a uniform flat surface or flat plain")
print("The digital elevation model uses a contoured surface")
print("The contoured surface used slopes down in an Easterly direction")
print("This means that particles travel further using the DEM")
# Set up the GUI window and size
root = tk.Tk()
root.geometry("500x300")
root.title("Bacterial Bomb") #Add a title to the GUI window
# Add a menu with help function
# Source: https://www.youtube.com/watch?v=PSm-tq5M-Dc
menu1 = tk.Menu(root)
root.config(menu=menu1)
subMenu = tk.Menu(menu1)
menu1.add_cascade(label="Help", menu=subMenu)
subMenu.add_command(label="Particle choice", command=particlehelp)
subMenu.add_command(label="Windspeed", command=windspeedhelp)
subMenu.add_command(label= "Surface", command=surfacehelp)
# Add a button used for running the program, routine run when button clicked
button1 = tk.Button(root, text ="Run", command=routine)
button1.grid(row=6, column=0)
# Add a label above the entry box
label2 = tk.Label(root, text="Enter number of particles: 5000 recommended")
label2.grid(row=2, column=4, padx=5, pady=5)
# Add an entry box used for number of particles
my_entry = tk.Entry(root, width=15)
my_entry.grid(row=3, column=4)
# Add a label above the scale widget
label3 = tk.Label(root, text="Enter wind speed (beaufort scale)")
label3.grid(row=4, column=4)
# Add a scale widget for windspeed
# Source: dummies.com/programming/python/using-tkinter-widgets-in-python/
scale_widget = tk.Scale(root, from_=0, to=12, orient=tk.HORIZONTAL)
scale_widget.set(4)
scale_widget.grid(row=5, column=4)
# Add a label above the scale widget
label4 = tk.Label(root, text="Enter the type of surface")
label4.grid(row=7, column=4)
# Add a drop down box for choosing the type of surface
listbox_entries = ["Flat plain", "Digital elevation model"]
listbox_widget = tk.Listbox(root, height=2, width=25)
for entry in listbox_entries:
listbox_widget.insert(tk.END, entry)
listbox_widget.grid(row=8, column=4, padx=5, pady=5)
textbox = tk.Text(root,height=4, width=20, padx=5,
pady=5, font=("Helvetica",10))
textbox.insert(
tk.END,"INSTRUCTIONS\nChoose parameters\nPress Run\nClose this window\n")
textbox.grid(row=0, column=0, padx=5, pady=5)
root.mainloop()# GUI window keeps running until it is closed
# The following functions are all called in main at the bottom. They are listed
# in order that they are called.
# Scraping data from the web to identify bomb location (need to be online)
# Website address line is too long but could not make it work splitting it up
def getdata():
"""Scrapes file with the bomb site from the web and saves it as citydata"""
city = []
url ="http://www.geog.leeds.ac.uk/courses/computing/study/core-python-odl2/assessment2/wind.raster"
html_content = requests.get(url).text
soup = BeautifulSoup(html_content, "lxml")
city = soup.find_all("p")
# Save beautiful soup resultset as text file to access the individual numbers
# Saving to a file then reading that file seems rather inefficent
# Need to find a better way to access the data using BeautifulSoup
with open('city.txt', 'w') as testfile:
for row in city:
testfile.write(' '.join([str(a) for a in row]) + '\n')
# This code opens the text file and defines the reader, the new line separator
# in the dataset, and the format of the data with one decimal place.
# Code mainly copied from agent based model work.
f = []
f = open('city.txt', newline='\n')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader: #Fill the list with 2D data row by row
citydata.append(row)
del citydata[300] # Deletes a fragment of html syntax
f.close()
#print(citydata[0]) #Testing prints
#print(len(citydata))#Expecting 300 items, but gives 301
#print(citydata[300]) #Check what is the last item to make 301
#del citydata[300] #Deletes a fragment of html syntax
#print(len(citydata)) #We now have a list with data for a 300x300 frame
# This code reads an environment data file to use as a DEM basemap.
# Then, if chosen, instead of landing on flat surface particles land on a DEM.
# This code is copied from agent based model work.
def getenvironment():
"""Reads a file with a contoured surface and saves it as environment"""
file = []
file = open('in.txt', newline='')
reader = csv.reader(file, quoting=csv.QUOTE_NONNUMERIC)
for row in reader: # Fill the list with 2D data row by row
environment.append(row)
file.close()
# Find height of land at bomb site
# Particle assumed to be released from height of building + height of land
#print(environment[150][50]) # Land is 200m at bomb site
# Plot the data for intial exploration
#matplotlib.pyplot.imshow(citydata)
#matplotlib.pyplot.axis([0,299,0,299])
#matplotlib.pyplot.show() # Appears to be data around x50 y150
# This code identifies the bomb site
def findbomb():
"""Indentifies the x and y coordinates of the bomb site"""
for i in range (len(citydata)):
for j in range (len(citydata[i])):
if citydata[i][j]>0:
#print(citydata[i][j])
global xb
global yb
xb = j # The coordinates of the bomb
yb = i
# So now we know that there is a single bombsite at location x50,y150.
# It is marked by the number 255, whilst all other cells have zero
# This function creates particles used in function below
# Assume a human stands on the building to release particles 1m above roof
# If "Flat plain" is selected particle height is 75m plus 1m = 76m
# If "DEM" is selected particle height is 200m + 76m = 276m
def createparticles():
"""Creates the number of particles specified in the GUI"""
if topography == "Digital elevation model": #Selected in GUI
z = 276 # The elevation in the DEM at the bomb site is 200m
else:
z = 76
for i in range(num_of_particles):
x = xb
y = yb
ws = wind_speed
particles.append(particlemove.Particle (x, y, z, ws, environment))
#print(particles[i]) # Used for testing
#print(particles[0].x)
# This function iterates the particles through methods in particlemove.py
def iterateparticles():
"""Iterates particles through the move methods in particlemove.py"""
for j in range(num_of_iterations):
#print("Iteration")
for i in range(num_of_particles):
#print("Particle moving")
particles[i].zmove() # Moves particles up or down
particles[i].landing() # Considers if the particle has landed
particles[i].xymove() # Moves particles x or y coordinates
#for i in range(num_of_particles):
#print(particles[i])
# Plot the data as a density map.
# Firstly record the number of particles in each cell of citydata
# Increment the citydata file for each particle landing
# Then plot this data as a density map
# Two mapping options based on Flat plain or DEM selection
def plotdata():
"""Records coordinates of each landing particle and plots a density map"""
#print("Plotting data")
for i in range(num_of_particles):
citydata[particles[i].y][particles[i].x] += 1 # Increment per particle
citydata[150][50] -= 255 #Set bomb site data to zero
#for i in range(len(citydata)):
#for j in range(len(citydata[i])):
#if citydata[i][j]>60: #Used to examine the upper range of data
#print("x ",i,"y ",j,"number ", citydata[i][j])
# If the user has chosen a Flat plain in the GUI the topography = Flat plain
if topography == "Flat plain":
#print("Flat plain")
# Vary the max in line below to see broad range or high central points
matplotlib.pyplot.imshow(citydata, vmin=0,vmax=40)
matplotlib.pyplot.colorbar(label="Particles")
matplotlib.pyplot.title("Map showing distribution of particles",
fontdict=None, loc=None, pad = None, y = None)
matplotlib.pyplot.text(45, 80, s="X marks the bomb site", fontsize=7)
matplotlib.pyplot.text(
45, 76,s="White dots mark cells where single particles landed",
fontsize=7)
matplotlib.pyplot.axis([45, 200, 100, 200])
matplotlib.pyplot.scatter(50, 150, marker="x", linewidth=3, c="w")
# Lines below add white dots where there is a single particle
for i in range(len(citydata)):
for j in range(len(citydata[i])):
if citydata[i][j] == 1:
matplotlib.pyplot.scatter(j, i, s=0.3, c="w")
matplotlib.pyplot.show()
else: #If DEM has been chosen
#print("Digital elevation model")
matplotlib.pyplot.contourf(environment)
matplotlib.pyplot.colorbar(label="Elevation")
matplotlib.pyplot.title(
"Map showing distribution of particles",
fontdict=None, loc=None, pad=None, y=None)
# Next two lines are too long, splitting them mad the plot look poor
matplotlib.pyplot.text(45, 66, s="X marks the bomb site, White dots mark cells where single particles landed",fontsize=7)
matplotlib.pyplot.text(45, 62, s="Pink dots mark cells where 2 to 15 particles landed, Red dots mark cells where more than 15 landed", fontsize=7)
matplotlib.pyplot.axis([45, 300, 80, 220])
matplotlib.pyplot.scatter(50, 150, marker="x", linewidth=3, c="w")
# Code below creates a scatter plot showing different intensities
for i in range(len(citydata)):
for j in range(len(citydata[i])):
if citydata[i][j] > 15:
matplotlib.pyplot.scatter(j, i, s=0.3, c="r")
elif citydata[i][j] > 1 <16:
matplotlib.pyplot.scatter(j, i, s=0.3, c="tab:pink")
elif citydata[i][j] == 1:
matplotlib.pyplot.scatter(j, i, s=0.3, c="w")
matplotlib.pyplot.show()
# Save the density map to a text file (Need to eliminate decimal place)
def savedata():
"""Saves the landed particle coordinates into a text file, citydata.txt"""
with open('citydata.txt', 'w') as testfile:
for row in citydata:
testfile.write(' '.join([str(a) for a in row]) + '\n')
# This is the main function that organises and calls the other functions
def main():
"""Runs the main functions and times the code"""
mainstart = time.time() #For timing the main program
getdata()
findbomb()
getenvironment()
createparticles()
iterateparticles()
plotdata()
savedata()
mainend = time.time()
time_elapsed = mainend-mainstart
print("TIMING")
print ("Time elapsed", "%.4f" % time_elapsed,"seconds")
print("End, file saved to citydata.txt")
main()
| simonhp66/bacterialbomb | bacterialbomb.py | bacterialbomb.py | py | 13,564 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tkinter.ANCHOR",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Tk",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "tkinter.Menu",
"line_num... |
70233514344 | from flask import render_template,request,redirect,url_for
from .import main
from ..request import get_sources,get_articles,search_news
from ..models import Source
@main.route("/")
def index():
"""
View root function that returns the index page and its data
"""
popular_news = get_sources("popular")
title = "Welcome to the best news outlet"
search_news = request.args.get("article_query")
if search_news:
return redirect(url_for('main.search',news_feed = search_news))
else:
return render_template("index.html",title = title,popular = popular_news)
@main.route("/sources/<sources_id>")
def sources(sources_id):
'''
View news page function that returns the movie details page and its data
'''
news_source = get_articles(sources_id)
title = f"{sources_id}"
return render_template("news.html",id = sources_id,title = title,news = news_source)
@main.route("/search/<news_feed>")
def search(news_feed):
'''
View function to display the search results
'''
news_name_list = news_feed.split(" ")
news_name_format = "+".join(news_name_list)
searched_news = search_news(news_name_format)
title = "News results"
#search_news = request.args.get('article_query')
return render_template("search.html",article = searched_news)
| alexmwaura/NewsApp | app/main/views.py | views.py | py | 1,389 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "request.get_sources",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "request.search_news",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.... |
40302056937 | import sys
import datetime
from project import app
from common_utilities import Constant
from project.users.models import Counter, Users
from flask_login import login_required, logout_user, current_user
from flask import Blueprint, render_template, session, make_response, jsonify, request, redirect, url_for
sys.path.append('../../')
from project.users.memcache_ctrl import client, CONSTANT
users_blueprint = Blueprint('users', __name__, template_folder='templates')
@users_blueprint.route('/request_accepted_counter', methods=['GET', 'POST'])
@login_required
def request_accepted_counter():
if request.method == "POST":
instagram_username = session.get("username").get("username")
counter_id = session.get("current_counter_id")
if counter_id is None:
counter_id_obj = Counter().get_last_counter_info(instagram_username)
if counter_id_obj is not None:
counter_id = counter_id_obj.id
session["current_counter_id"] = counter_id_obj.id
session.modified = True
total_success_request, total_request, failed_request = 0, 0, 0
is_complete = False
last_date_time = None
try:
dict_index_name = instagram_username.lower() + Constant.CONSTANT().ALL_INFO
all_info = client.get(dict_index_name)
if isinstance(all_info, dict):
total_request = all_info[Constant.CONSTANT().TOTAL_REQUEST_TO_BE_ACCEPT]
total_success_request = all_info[Constant.CONSTANT().SUCCESSFUL_ACCEPTED]
failed_request = all_info[Constant.CONSTANT().REQUEST_FAILED]
is_complete = all_info[Constant.CONSTANT().IS_REQUEST_COMPLETE]
if is_complete == True:
last_date_time = all_info.get("update_date")
else:
counter_stats_row = Counter.get_one_counter(counter_id)
if counter_stats_row is not None:
total_success_request = counter_stats_row.total_accepted_request
total_request = counter_stats_row.input_request_count
failed_request = counter_stats_row.input_request_count
is_complete = counter_stats_row.is_request_complete
last_date_time = counter_stats_row.update_date
except Exception as error:
print("Not able to get count ", error)
response_dict = {"successful": total_success_request,
"username": instagram_username.upper(),
"failed": failed_request,
"isComplete": is_complete,
"total": total_request,
"lastDateTime": last_date_time.strftime("%a %B %d %Y %I:%M:%S %p") if last_date_time is not None else None}
return make_response(jsonify(response_dict), 200)
@users_blueprint.route('/accept_pending_requests', methods=["GET", "POST"])
@login_required
def accept():
if request.method == "POST":
instagram_username = session["username"]["username"]
bot_obj = client.get(instagram_username)["bot_obj"]
no_to_accept = request.form.get("customUserInputNumber", 0)
init_dict_items = {
Constant.CONSTANT().TOTAL_REQUEST_TO_BE_ACCEPT: no_to_accept,
Constant.CONSTANT().IS_REQUEST_COMPLETE: False,
Constant.CONSTANT().SUCCESSFUL_ACCEPTED: 0,
Constant.CONSTANT().REQUEST_FAILED: 0,
}
dict_get_index = instagram_username.lower() + Constant.CONSTANT().ALL_INFO
client.set(dict_get_index, init_dict_items)
new_user_count_req = Counter(
insta_username=instagram_username,
input_request_count=no_to_accept,
total_accepted_request=0,
total_failed_request=0
)
new_user_count_req.save()
counter_id = new_user_count_req.id
session["current_counter_id"] = counter_id
session.modified = True
ctr_item = Counter.get_one_counter(session["current_counter_id"])
resp = bot_obj.approve_pending_follow_requests(number_of_requests=int(no_to_accept), ctr_item=ctr_item, init_dict_items=init_dict_items, dict_get_index=dict_get_index, counter_ctr=0)
if resp == "No request to accept":
return "No request to accept"
if resp == None:
return "True"
return "True"
elif request.method == "GET":
instagram_username = session.get("username").get("username")
user_obj = Users.query.filter_by(insta_username=instagram_username).first()
last_day = str(days_between(user_obj.till_date)) + " days"
return render_template("AcceptRequests.html", last_day=last_day)
@users_blueprint.route('/logout', methods=["GET", "POST"])
@login_required
def logout():
try:
instagram_username = current_user.insta_username
if current_user.is_authenticated():
client.delete(instagram_username)
session.clear()
logout_user()
client.delete(instagram_username)
session.clear()
except:
pass
return redirect(url_for('core.index'))
def days_between(d1):
d1 = datetime.datetime.strptime(str(d1.date()), "%Y-%m-%d")
d2 = datetime.datetime.strptime(str(datetime.datetime.utcnow().date()), "%Y-%m-%d")
return abs((d2 - d1).days)
default_args = {}
footer_var = {"cp_year": datetime.datetime.now().year}
@app.before_first_request
def load_default():
default_args["footer_content"] = render_template("footer.html", **footer_var)
return default_args | PatelFarhaan/ACCEPTME-PROD | project/users/views.py | views.py | py | 5,641 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "flask.Blueprint",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
... |
38516771350 | import boto.exception
from awscompat import config, util
from awscompat.connections import ec2_conn
from awscompat.tests.base import TestNode
class TestDescribeImages(TestNode):
"""Failing test for https://bugs.launchpad.net/nova/+bug/755829"""
def pre(self):
assert ec2_conn.get_all_images(
[config['ec2']['test_image_id']]
)[0].name
class TestSecurityGroups(TestNode):
"""Test security group generation."""
def pre(self):
self.group_name = self.make_uuid('group_name')
self.group_desc = self.make_uuid('group_desc')
self.group = ec2_conn.create_security_group(
self.group_name,
self.group_desc
)
groups = util.retry(
lambda: ec2_conn.get_all_security_groups(
groupnames=[self.group_name])
)
assert len(groups)
def post(self):
self.group.delete()
# TODO: this might not raise because of delay.
# so I can't use the retry controller
# I should write a general purpose request wrapper
# which polls until it gets a different response.
@self.assert_raises(boto.exception.EC2ResponseError)
def test_throw():
ec2_conn.get_all_security_groups(groupnames=[self.group_name])
class TestKeyPairs(TestNode):
"""Test keypair generation."""
def pre(self):
self.key_name = self.make_uuid('key_name')
self.keypair = ec2_conn.create_key_pair(self.key_name)
assert len(ec2_conn.get_all_key_pairs(keynames=[self.key_name]))
def post(self):
ec2_conn.delete_key_pair(self.key_name)
@self.assert_raises(boto.exception.EC2ResponseError)
def test_boto_throw():
ec2_conn.get_all_key_pairs(keynames=[self.key_name])
class TestInstance(TestNode):
"""Test EC2 image launch and termination."""
depends = {
'key_pairs': TestKeyPairs,
'security_group': TestSecurityGroups
}
def pre(self, key_pairs=None, security_group=None):
image_id = config['ec2']['test_image_id']
instance_config = config['ec2']['test_instance']
self.image = ec2_conn.get_all_images(image_ids=[image_id])[0]
self.security_group = security_group
self.key_pairs = key_pairs
self.security_group.group.authorize('tcp', 22, 22, '0.0.0.0/0')
self.reservation = self.image.run(
security_groups=[self.security_group.group_name],
key_name=self.key_pairs.key_name,
**instance_config
)
util.wait(
lambda: self.reservation.instances[0].update() == 'running',
timeout=60 * 3
)
util.wait(
lambda: self.testTelnet(
self.reservation.instances[0].public_dns_name,
22
),
timeout = 60 * 5
)
assert util.retry(
lambda: self.testSSH(
self.key_pairs.keypair.material.encode('ascii'),
config['ec2']['test_username'],
self.reservation.instances[0].public_dns_name
),
wait_exp=2
)
def post(self):
self.reservation.instances[0].terminate()
#ec2_conn.terminate_instances([self.reservation.instances[0].id])
util.wait(
lambda: self.reservation.instances[0].update() == 'terminated',
timeout=60 * 2
)
assert util.retry(
lambda: not self.testSSH(
self.key_pairs.keypair.material.encode('ascii'),
config['ec2']['test_username'],
self.reservation.instances[0].public_dns_name
),
wait_exp=2
)
| mwhooker/aws-compat | awscompat/tests/ec2.py | ec2.py | py | 3,744 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "awscompat.tests.base.TestNode",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "awscompat.connections.ec2_conn.get_all_images",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "awscompat.connections.ec2_conn",
"line_number": 10,
"usage_typ... |
29290582448 | #!/usr/bin/env python
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('pose_saver')
listener = tf.TransformListener()
listener.waitForTransform('/map', '/base_footprint', rospy.Time(0), rospy.Duration(1.0))
rate = rospy.Rate(20.0)
with open('/media/hao/hao/dataset/ros_pose.txt', 'w') as f:
while not rospy.is_shutdown():
now = rospy.Time.now()
listener.waitForTransform('/map', '/base_footprint', now, rospy.Duration(10.0))
(trans, rot) = listener.lookupTransform('/map', '/base_footprint', now)
euler = tf.transformations.euler_from_quaternion(rot)
f.writelines(str(now) + '\t' + str(trans[0]) + '\t' + str(trans[1]) + '\t' + str(euler[2]) + '\n')
rate.sleep()
| QinHarry/CNN_SLAM | data/ros/src/mit_data/src/pose_saver.py | pose_saver.py | py | 788 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "rospy.init_node",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tf.TransformListener",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "rospy.Time",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "rospy.Duration",
"l... |
11903172959 | import time
from flask import request
from data_sheet import session, ShortMessage
from utils.tool import short_message
from ..user import bp
@bp.route('/mobile_text')
def test():
phone = request.json.get("phone")
if phone is None:
return {'code':201,'message':'请输入手机号码'}
try:
indonesia = short_message(phone)
except Exception as e:
print(e)
return {'code':202,'meaasge':'发送失败,请稍后再试'}
try:
result = session.query(ShortMessage).filter(ShortMessage.phonenumber == phone).first()
if result is None:
newMessage = ShortMessage(phonenumber=phone,meaasge=indonesia,time=str(time.time()))
session.add(newMessage)
session.commit()
else:
result.meaasge = indonesia
result.time = str(time.time())
session.add(result)
session.commit()
except Exception as e:
session.rollback()
return {'code':203,'message':'验证码无效'}
return {'code':200,'message':'success'} | yyiridescent/exchange | api/user/mobile_text.py | mobile_text.py | py | 1,107 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.request.json.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "utils.to... |
25325013946 | from rest_framework import status
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from rest_framework.generics import RetrieveAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from accounts.models import Profile
from rates.api.serializers import FiatRateListSerializer
from rates.models import FiatRate
class FiatRateAPIView(RetrieveAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
authentication_classes = [BasicAuthentication, SessionAuthentication, JSONWebTokenAuthentication]
def get(self, request):
try:
user_profile_obj = Profile.objects.get(user=request.user)
user_country = FiatRate.objects.get(country=user_profile_obj.country)
status_code = status.HTTP_200_OK
response = {
'success': True,
'status code': status_code,
'message': 'User Fiat Fetched',
'data': [{
'updated': user_country.updated,
'timestamp': user_country.timestamp,
'country': user_profile_obj.get_country(),
'dollar_rate': user_country.dollar_rate
}]
}
except Exception as e:
user_profile_obj = Profile.objects.get(user=request.user)
user_country = FiatRate.objects.get(country='United States Of America')
status_code = status.HTTP_200_OK
response = {
'success': True,
'status code': status_code,
'message': 'User Fiat Fetched',
'data': [{
'updated': user_country.updated,
'timestamp': user_country.timestamp,
'country': user_profile_obj.get_country(),
'dollar_rate': user_country.dollar_rate
}]
}
return Response(response, status=status_code)
class FiatListView(ListAPIView):
authentication_classes = [BasicAuthentication, SessionAuthentication, JSONWebTokenAuthentication]
serializer_class = FiatRateListSerializer
queryset = FiatRate.objects.all()
permission_classes = (IsAuthenticatedOrReadOnly,)
paginate_by = 15
| mathemartins/vendescrow | rates/api/views.py | views.py | py | 2,400 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.generics.RetrieveAPIView",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.IsAuthenticatedOrReadOnly",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "rest_framework.authentication.BasicAuthentication",
... |
4824133992 | import pygame
from pygame import font
class Button():
def __init__(self,alien_setting,screen,msg):
# 初始化按钮属性
self.screen = screen
self.screenRect = screen.get_rect()
#设置按钮的尺寸和其他属性
self.width,self.height = 200,50
self.buttonColor = (0,255,0)
self.textColor = (255,255,255)
self.font = font.SysFont(None,48)
# 创建按钮的rect对象,并使其居中
self.rect = pygame.Rect(0,0,self.width,self.height)
self.rect.center = self.screenRect.center
#该按钮的标签只需要创建一次
self.prepMsg(msg)
def prepMsg(self,msg):
'''将msg渲染为图像,并使其在按钮上居中'''
self.msgImage = self.font.render(msg,True,self.textColor,self.buttonColor)
self.msgImageRect = self.msgImage.get_rect()
self.msgImageRect.center = self.rect.center
def drawButton(self):
self.screen.fill(self.buttonColor,self.rect)
self.screen.blit(self.msgImage,self.msgImageRect)
| hongnn/myRemoteWarehouse | alien_game/button.py | button.py | py | 1,077 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.font.SysFont",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pygame.Rect",
"line_number": 16,
"usage_type": "call"
}
] |
74158129382 | # test_cloud_client.py
import pytest
from datetime import datetime
from testfixtures import LogCapture
from cloud_server import initialize_server, add_database_entry
from database_definitions import Patient
import io
import os
initialize_server()
pathname = os.getcwd()
full_pathname = pathname + '/images/test_image.jpg'
def convert_file_to_b64_string():
from cloud_client import convert_file_to_b64_string
b64str = convert_file_to_b64_string(full_pathname)
assert b64str[0:20] == "/9j/4AAQSkZJRgABAQEA"
def test_b64_to_ndarray():
from cloud_client import convert_file_to_b64_string
from cloud_client import b64_to_ndarray
b64str = convert_file_to_b64_string("test_image.jpg")
nd = b64_to_ndarray(b64str)
answer = nd[0][0:5]
expected = [[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197]]
assert (answer == expected).all
list1 = ['a', 'b', 'c', 'd']
list2 = [23, 98, 47, 24]
@pytest.mark.parametrize("listvar, val, expected", [(list1, 'c', 2),
(list2, 98, 1)])
def test_get_index(listvar, val, expected):
from cloud_client import get_index
answer = get_index(listvar, val)
assert answer == expected
def test_resize_image():
from cloud_client import convert_file_to_b64_string
from cloud_client import b64_to_ndarray
from cloud_client import resize_image
b64str = convert_file_to_b64_string("test_image.jpg")
nd = b64_to_ndarray(b64str)
resized_nd = resize_image(nd)
answer = resized_nd[0][0:5]
expected = [[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197]]
assert (answer == expected).all
def test_b64_string_to_file():
from cloud_client import convert_file_to_b64_string
from cloud_client import b64_string_to_file
import filecmp
import os
b64str = convert_file_to_b64_string("test_image.jpg")
b64_string_to_file(b64str, open("test_image_output.jpg", "wb"))
answer = filecmp.cmp("test_image.jpg",
"test_image_output.jpg")
os.remove("test_image_output.jpg")
assert answer is True
def test_process_b64():
from cloud_client import convert_file_to_b64_string
from cloud_client import b64_to_ndarray
from cloud_client import resize_image
b64str = convert_file_to_b64_string("test_image.jpg")
nd = b64_to_ndarray(b64str)
resized_nd = resize_image(nd)
answer = resized_nd[0][0:5]
expected = [[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197]]
assert (answer == expected).all
| pdijour/Patient_Monitoring_FullStack | test_cloud_client.py | test_cloud_client.py | py | 2,802 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cloud_server.initialize_server",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cloud_client.convert_file_to_b64_string",
"line_number": 19,
"usage_type": "call"
},
{
"... |
16221940900 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
import unittest
from nose.tools import assert_in, assert_raises
from wechatsogou.const import WechatSogouConst
from wechatsogou.request import WechatSogouRequest
class TestBasicGenSearchArticleURL(unittest.TestCase):
def test_gen_hot_url(self):
for hot_index in filter(lambda x: not x.startswith('__'), dir(WechatSogouConst.hot_index)):
url = WechatSogouRequest.gen_hot_url(hot_index)
assert_in('http://weixin.sogou.com/wapindex/wap/0612/wap_', url)
assert_in('0.html', url)
with assert_raises(AssertionError):
WechatSogouRequest.gen_hot_url(hot_index, 0)
for page in range(1, 5):
url = WechatSogouRequest.gen_hot_url(hot_index, page)
assert_in('http://weixin.sogou.com/wapindex/wap/0612/wap_', url)
assert_in('{}.html'.format(page - 1), url)
if __name__ == '__main__':
unittest.main()
| chyroc/WechatSogou | test/test_request_gen_hot_url.py | test_request_gen_hot_url.py | py | 1,032 | python | en | code | 5,658 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "wechatsogou.const.WechatSogouConst.hot_index",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "wechatsogou.const.WechatSogouConst",
"line_number": 14,
"usage_t... |
35058044672 | from flask import Flask, request, jsonify
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.engine import Engine
from sqlalchemy import event
import os
import sqlite3
database = SQLAlchemy()
app = Flask(__name__)
app.config.from_mapping(
SECRET_KEY = 'dev',
SQLALCHEMY_DATABASE_URI = r"sqlite:///C:\\Users\syedr\Documents\databaseFlask\backend\sqlite.db"
)
@event.listens_for(Engine, 'connect')
def activate_foreign_keys(connection, connection_record):
if type(connection) is sqlite3.Connection:
cursor = connection.cursor()
cursor.execute('PRAGMA foreign_keys=ON')
cursor.close()
database.init_app(app)
with app.app_context():
database.reflect()
print(database.metadata.tables.keys())
try:
os.makedirs(app.instance_path)
except OSError:
pass
from .routes import * | Talha7011235/studentCourseInstructorDatabaseFlaskIndividualAssignment | backend/__init__.py | __init__.py | py | 857 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlite3.Connection",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sqlalc... |
36918913878 | # based on A Plus Coding's tutorial at: https://www.youtube.com/watch?v=GKe1aGQlKDY&list=PLryDJVmh-ww1OZnkZkzlaewDrhHy2Rli2
import pygame
import sys
from game_window_class import *
from button_class import *
FPS = 60 # max frames per second
EVALUATE_DAMPER = 10 # decrease to fo evaluations faster
WIDTH = 1150
HEIGHT = 800
GAME_WIN_X = 25
GAME_WIN_Y = 75
BG_COLOUR = (59, 55, 53)
RUN_BUTTON_COLOUR = (72, 107, 79)
RUN_BUTTON_HOVER_COLOUR = (82, 125, 91)
RUN_BUTTON_BORDER_COLOUR = (0,0,0) # (33,33,33)
PAUSE_BUTTON_COLOUR = (130, 113, 77)
PAUSE_BUTTON_HOVER_COLOUR = (150, 130, 87)
PAUSE_BUTTON_BORDER_COLOUR = (0,0,0)
RESET_BUTTON_COLOUR = (110, 69, 69)
RESET_BUTTON_HOVER_COLOUR = (135, 84, 84)
RESET_BUTTON_BORDER_COLOUR = (0,0,0)
#-------------------- SETTING FUNCTIONS --------------------#
def get_events():
global running
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
if mouse_on_grid(mouse_pos):
click_cell(mouse_pos)
else:
for button in buttons:
button.click()
def update():
game_window.update()
for button in buttons:
button.update(mouse_pos, game_state=state)
def draw():
window.fill(BG_COLOUR)
for button in buttons:
button.draw()
game_window.draw()
#-------------------- RUNNING FUNCTIONS --------------------#
def running_get_events():
global running
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
if mouse_on_grid(mouse_pos):
click_cell(mouse_pos)
else:
for button in buttons:
button.click()
def running_update():
game_window.update()
for button in buttons:
button.update(mouse_pos, game_state=state)
if frame_count%(FPS//EVALUATE_DAMPER) == 0:
game_window.evaluate()
def running_draw():
window.fill(BG_COLOUR)
for button in buttons:
button.draw()
game_window.draw()
#-------------------- PAUSED FUNCTIONS --------------------#
def paused_get_events():
global running
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
if mouse_on_grid(mouse_pos):
click_cell(mouse_pos)
else:
for button in buttons:
button.click()
def paused_update():
game_window.update()
for button in buttons:
button.update(mouse_pos, game_state=state)
def paused_draw():
window.fill(BG_COLOUR)
for button in buttons:
button.draw()
game_window.draw()
def mouse_on_grid(pos):
if (pos[0] > GAME_WIN_X) and (pos[0] < WIDTH-GAME_WIN_X):
if (pos[1] > GAME_WIN_Y) and (pos[1] < GAME_WIN_Y+WIN_HEIGHT):
return True
return False
def click_cell(pos):
grid_pos = [pos[0]-GAME_WIN_X, pos[1]-GAME_WIN_Y]
grid_pos[0] = grid_pos[0]//CELL_SIZE
grid_pos[1] = grid_pos[1]//CELL_SIZE
if game_window.grid[grid_pos[1]][grid_pos[0]].alive:
game_window.grid[grid_pos[1]][grid_pos[0]].alive = False
else:
game_window.grid[grid_pos[1]][grid_pos[0]].alive = True
def make_buttons():
buttons = []
# RUN
buttons.append(Button(window, WIDTH//2-50, 25, 100, 30, text='RUN',
colour=RUN_BUTTON_COLOUR, hover_colour=RUN_BUTTON_HOVER_COLOUR,
border_colour=RUN_BUTTON_BORDER_COLOUR, bold_text=True,
function=run_game, state='setting'))
# PAUSE
buttons.append(Button(window, WIDTH//2-50, 25, 100, 30, text='PAUSE',
colour=PAUSE_BUTTON_COLOUR, hover_colour=PAUSE_BUTTON_HOVER_COLOUR,
border_colour=PAUSE_BUTTON_BORDER_COLOUR, bold_text=True,
function=pause_game, state='running'))
# RESUME
buttons.append(Button(window, WIDTH//5-50, 25, 100, 30, text='RESUME',
colour=RUN_BUTTON_COLOUR, hover_colour=RUN_BUTTON_HOVER_COLOUR,
border_colour=RUN_BUTTON_BORDER_COLOUR, bold_text=True,
function=run_game, state='paused'))
# RESET
buttons.append(Button(window, WIDTH//1.25-50, 25, 100, 30, text='RESET',
colour=RESET_BUTTON_COLOUR, hover_colour=RESET_BUTTON_HOVER_COLOUR,
border_colour=RESET_BUTTON_BORDER_COLOUR, bold_text=True,
function=reset_grid, state='paused'))
return buttons
def run_game():
global state
state = 'running'
def pause_game():
global state
state = 'paused'
def reset_grid():
global state
state = 'setting'
game_window.reset_grid()
pygame.init()
window = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
game_window = GameWindow(window, GAME_WIN_X, GAME_WIN_Y)
buttons = make_buttons()
state = 'setting'
frame_count = 0
running = True
while running:
frame_count += 1
mouse_pos = pygame.mouse.get_pos()
if state == 'setting':
get_events()
update()
draw()
if state == 'running':
running_get_events()
running_update()
running_draw()
if state == 'paused':
paused_get_events()
paused_update()
paused_draw()
pygame.display.update()
clock.tick(FPS)
pygame.quit()
sys.exit()
| ruankie/game-of-life | main.py | main.py | py | 5,738 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.event.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEBUTTO... |
8797239686 |
import json
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import os
import argparse
from pathlib import Path
import tqdm
#import models
from models.backbone import Backbone
from models.classification_head import ClassificationHead
#import dataloaders
from dataloader.cifar10 import CIFAR10
from dataloader.fashion_mnist import FashionMNIST
from dataloader.multi_task_batch_scheduler import BatchSchedulerSampler
# import dataset classes for concatenation purposes
from torch.utils.data.dataset import ConcatDataset
#import progressbar
from utils.utils import progress_bar
from utils.variables import classifier_dict
# trying to figure out how to enumerate over the two dataloaders
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument("--batch-size", type=int, default=128,
help="Training Batch size")
parser.add_argument("--n_epochs", type=int, default=700,
help="No of epochs")
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument("--cifar_checkpoint_path", type=str, default="",
help="CIFAR10's checkpoint")
parser.add_argument("--fashion_mnist_checkpoint_path", type=str, default="",
help="FASHION-MNIST's checkpoint")
parser.add_argument("--training_type", type=str, default="conditioned",
help="type of training (conditioned")
parser.add_argument("--num-workers", type=int, default=2,
help="Number of workers for dataloaders")
parser.add_argument("--backbone", type=str, default="resnet18",
help="BACKBONE TO TRAIN WITH:resnet18/resnet50/resnest50")
args = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#global variables
best_cifar_acc = 0 # best test accuracy for cifar
best_fashion_mnist_acc = 0 # best test accuracy for fashion mnist
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# define learning rate
learning_rate = 0
# returns trainloader and two testloaders
def get_dataloaders():
# combined trainloader for cifar & fash-mnist
# samples alternately from the two batches
cifar = CIFAR10(data_root="dataset/cifar10",
transform=None,
mode='train',
)
fashion_mnist = FashionMNIST(data_root="dataset/fashion-mnist",
transform=None,
mode='train',
)
concat_dataset = ConcatDataset([cifar, fashion_mnist])
trainloader = torch.utils.data.DataLoader(dataset=concat_dataset,
sampler=BatchSchedulerSampler(dataset=concat_dataset,
batch_size=args.batch_size),
batch_size=args.batch_size,
)
testset_cifar = CIFAR10(data_root="dataset/cifar10",
transform=None,
mode='test')
testloader_cifar = torch.utils.data.DataLoader(
testset_cifar, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
testset_fashion_mnist = FashionMNIST(data_root="dataset/fashion-mnist",
transform=None,
mode='test')
testloader_fashion_mnist = torch.utils.data.DataLoader(
testset_fashion_mnist, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
return trainloader, testloader_cifar, testloader_fashion_mnist
# conditional training
def train(epoch):
print('\nEpoch: %d' % epoch)
print('\nLearning Rate: %f'%args.lr)
# print('Total Epochs: %d' % args.n_epochs)
print('Training Type: : %s' % args.training_type)
model.train()
classifier_cifar.train()
classifier_fashion_mnist.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets, meta) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
inputs = inputs.permute(0, 3, 1, 2)
embedding_labels = meta['conditioning_label'].to(device)
embedding_labels = embedding_labels.unsqueeze(1)
optim_model.zero_grad()
optim_classifier_cifar.zero_grad()
optim_classifier_fashion_mnist.zero_grad()
# fwd pass through common backbone
output = model(inputs, embedding_labels)
output_cifar = classifier_cifar(output)
output_fashion_mnist = classifier_fashion_mnist(output)
loss_cifar = criterion(output_cifar, targets)
loss_fashion_mnist = criterion(output_fashion_mnist, targets)
# print(meta['conditioning_label'][0],"label")
#case: dataset is cifar
if meta['conditioning_label'][0] == 0:
#print("dataset: cifar")
loss = loss_cifar
outputs = output_cifar
loss.backward()
optim_classifier_cifar.step()
optim_model.step()
#case: dataset is fashion_mnist
if meta['conditioning_label'][0] == 1:
#print("dataset: fashion_mnist")
loss = loss_fashion_mnist
outputs = output_fashion_mnist
loss.backward()
optim_classifier_fashion_mnist.step()
optim_model.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
# print("predicted",predicted)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
# break for debugging
# code to dump config at the path
def dump_config(epoch, save_dir):
config = {
'epoch:': epoch,
'learning_rate': learning_rate,
'cifar_acc': best_cifar_acc,
'fashion_mnist_acc': best_fashion_mnist_acc,
'n_epochs': args.n_epochs
}
with open(save_dir+'/config.json', 'w') as fp:
json.dump(config, fp)
def test(epoch):
print("in testing code")
global best_cifar_acc
global best_fashion_mnist_acc
model.eval()
classifier_cifar.eval()
classifier_fashion_mnist.eval()
########## EVALUATE IN CIFAR TESTLOADER ONCE ############################
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets, meta) in enumerate(testloader_cifar):
inputs, targets = inputs.to(device), targets.to(device)
inputs = inputs.permute(0, 3, 1, 2)
embedding_labels = meta['conditioning_label'].to(device)
embedding_labels = embedding_labels.unsqueeze(1)
outputs = model(inputs, embedding_labels)
outputs = classifier_cifar(outputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader_cifar), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_cifar_acc:
print('Saving..')
# state = {
# 'model': model.state_dict(),
# 'classifier_cifar': classifier_cifar.state_dict(),
# 'classifier_fashion_mnist': classifier_fashion_mnist.state_dict(),
# 'cifar_acc': acc,
# 'fashion_mnist_acc': best_fashion_mnist_acc,
# 'epoch': epoch,
# }
# dump only the data for cifar
state = {
'model': model.state_dict(),
'classifier_cifar': classifier_cifar.state_dict(),
'classifier_fashion_mnist': classifier_fashion_mnist.state_dict(),
'cifar_acc': acc,
'epoch': epoch,
}
# if not os.path.isdir('checkpoint'):
# os.mkdir('checkpoint')
# dump the dictionary to the
torch.save(state, str(save_dir/'cifar'/'checkpoint.pth'))
best_cifar_acc = acc
dump_config(epoch, str(save_dir/'cifar'))
########## EVALUATE IN FASHION MNIST TESTLOADER ONCE ############################
acc = 0
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets, meta) in enumerate(testloader_fashion_mnist):
inputs, targets = inputs.to(device), targets.to(device)
inputs = inputs.permute(0, 3, 1, 2)
embedding_labels = meta['conditioning_label'].to(device)
embedding_labels = embedding_labels.unsqueeze(1)
outputs = model(inputs, embedding_labels)
outputs = classifier_fashion_mnist(outputs)
# print("targets",targets)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
# print("predicted",predicted)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader_fashion_mnist), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_fashion_mnist_acc:
print('Saving..')
# state = {
# 'model': model.state_dict(),
# 'classifier_cifar': classifier_cifar.state_dict(),
# 'classifier_fashion_mnist': classifier_fashion_mnist.state_dict(),
# 'cifar_acc': best_cifar_acc,
# 'fashion_mnist_acc': acc,
# 'epoch': epoch,
# }
#dump ony fashioon mnist data
state = {
'model': model.state_dict(),
'classifier_cifar': classifier_cifar.state_dict(),
'classifier_fashion_mnist': classifier_fashion_mnist.state_dict(),
'fashion_mnist_acc': acc,
'epoch': epoch,
}
# if not os.path.isdir('checkpoint'):
# os.mkdir('checkpoint')
# dump the dictionary to the
torch.save(state, str(save_dir/'fashion_mnist'/'checkpoint.pth'))
best_fashion_mnist_acc = acc
dump_config(epoch, str(save_dir/'fashion_mnist'))
###################################### TRAINING STARTS HERE ############################
local_data_path = Path('.').absolute()
# create experiment
experiment = args.training_type
save_dir = (local_data_path/'experiments'/args.backbone/experiment)
(save_dir/'cifar').mkdir(exist_ok=True, parents=True)
(save_dir/'fashion_mnist').mkdir(exist_ok=True, parents=True)
# get dataloaders
trainloader, testloader_cifar, testloader_fashion_mnist = get_dataloaders()
# get model with embedding
model = Backbone(backbone=args.backbone,apply_embedding=True).to(device)
# get two separate classifiers
classifier_cifar = ClassificationHead(num_classes=10,in_channels=classifier_dict[args.backbone]).to(device)
classifier_fashion_mnist = ClassificationHead(num_classes=10,in_channels=classifier_dict[args.backbone]).to(device)
# create loss
criterion = nn.CrossEntropyLoss()
# create optimizers
optim_model = optim.SGD(model.parameters(), lr=args.lr,
momentum=0.9, weight_decay=5e-4)
optim_classifier_cifar = optim.SGD(classifier_cifar.parameters(), lr=args.lr,
momentum=0.9, weight_decay=5e-4)
optim_classifier_fashion_mnist = optim.SGD(classifier_fashion_mnist.parameters(), lr=args.lr,
momentum=0.9, weight_decay=5e-4)
############ CODE FOR RESUMING THE TRAINING ###########################################
if args.cifar_checkpoint_path != "" and args.fashion_mnist_checkpoint_path!= "":
# Load data from cifar checkpoint.
print('==> Resuming from cifar..')
checkpoint = torch.load(args.cifar_checkpoint_path)
# LOAD THE MODEL FROM CIFAR BEST WEIGHT FOR NOW, TRY LOADING FROM FASHION-MNIST IN ANOTHER EXPERIMENT
model.load_state_dict(checkpoint['model'])
classifier_cifar.load_state_dict(checkpoint['classifier_cifar'])
best_cifar_acc = checkpoint['cifar_acc']
cifar_epoch = checkpoint['epoch']
# Load data from fashion-mnist checkpoint.
print('==> Resuming from fashion mnist..')
checkpoint = torch.load(args.fashion_mnist_checkpoint_path)
#model.load_state_dict(checkpoint['model'])
classifier_fashion_mnist.load_state_dict(checkpoint['classifier_fashion_mnist'])
best_fashion_mnist_acc = checkpoint['fashion_mnist_acc']
fashion_mnist_epoch = checkpoint['epoch']
# Resolve conflicts in loading data from two separate checkpoints
start_epoch= min(cifar_epoch, fashion_mnist_epoch)
def update_learning_rate(epoch, n_epochs):
# update model lr
ratio = epoch/n_epochs
global learning_rate
if ratio < 0.4:
learning_rate = 0.1
elif 0.4 <= ratio < 0.7:
learning_rate = 0.01
else:
learning_rate = 0.001
# update learning rate
for param_group in optim_model.param_groups:
param_group['lr'] = learning_rate
# update classifier_cifar learning rate
for param_group in optim_classifier_cifar.param_groups:
param_group['lr'] = learning_rate
# update classifier_fashion_mnist learning rate
for param_group in optim_classifier_fashion_mnist.param_groups:
param_group['lr'] = learning_rate
print("ratio: ", ratio, " lr: ", learning_rate)
def main():
# apply the training schedue
for epoch in range(start_epoch, start_epoch+400):
# call train
#update_learning_rate(epoch, args.n_epochs)
train(epoch)
test(epoch)
print("epoch: ", epoch, "Cifar best accuracy found is: ", best_cifar_acc,
"fashion mnist best accuracy found is: ", best_fashion_mnist_acc)
print("Cifar best accuracy found is: ", best_cifar_acc,
"fashion mnist best accuracy found is: ", best_fashion_mnist_acc)
if __name__ == '__main__':
main()
| rajatmodi62/multi-purpose-networks | train_conditioned.py | train_conditioned.py | py | 14,471 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.cu... |
247176838 | from trytond.model import ModelView, ModelSQL, fields
from trytond.pool import Pool
from sql import Table, Column, Literal, Desc, Asc, Expression, Flavor
from sql.functions import Now, Extract
from sql.operators import Or, And, Concat, ILike, Operator
from sql.conditionals import Coalesce
from sql.aggregate import Count, Max
from nereid import (
request, abort, render_template, login_required, url_for, flash, jsonify,
current_app, route
)
from nereid.contrib.pagination import Pagination, BasePagination
from nereid.ctx import has_request_context
__all__ = ['Artist']
class Artist(ModelSQL, ModelView):
"Artist"
__name__ = "songbook.artist"
_rec_name = 'full_name'
last_name = fields.Char('Last Name', required=True, select=True)
first_name = fields.Char('First Name', select=True)
full_name = fields.Function(
fields.Char('Full Name'), 'get_full_name',
searcher='search_full_name'
)
rev_name = fields.Function(
fields.Char('Reversed Name'), 'get_rev_name'
)
songs = fields.One2Many(
"songbook.song",
'artist',
'Songs by This Artist'
)
def serialize(self):
"""
Serialize the artist object and return a dictionary.
"""
object_json = {
"url": url_for(
'songbook.artist.render_html',
id=self.id,
),
"objectType": self.__name__,
"id": self.id,
"lastName": self.last_name,
"firstName": self.first_name,
"fullName": self.full_name,
}
return object_json
@classmethod
def __setup__(cls):
super(Artist, cls).__setup__()
cls._sql_constraints = [
('name_uniq', 'UNIQUE(last_name, first_name)',
'An artist with that name already exists.')
]
cls._order.insert(0, ('last_name', 'ASC'))
cls._order.insert(1, ('first_name', 'ASC'))
@classmethod
def search_full_name(cls, name, clause):
"Search Full Name"
_, operator, value = clause
Operator = fields.SQL_OPERATORS[operator]
table = cls.__table__()
fullname = Concat(
Coalesce(table.first_name, Literal('')),
Concat(
Literal(' '),
Coalesce(table.last_name, Literal(''))
)
)
query = table.select(table.id, where=Operator(fullname, value))
return [('id', 'in', query)]
def get_full_name(self, name):
if self.first_name is None:
fullname = self.last_name
else:
fullname = "%s %s" % (self.first_name, self.last_name)
return fullname.strip(" ")
def get_rev_name(self, name):
if self.first_name is None:
revname = self.last_name
else:
revname = "%s, %s" % (self.last_name, self.first_name)
return revname.strip(", ")
@classmethod
@route('/songbook/api/artists', methods=['GET', 'POST'])
def call_api_index(cls):
"""
JSON-formatted REST API to support 3rd party integration, apps
and web page javascript such as search-as-you-type.
"""
name_filter = '%' + request.args.get('namecontains', '') + '%'
domain = [
('full_name', 'ilike', name_filter)
]
artists = cls.search(domain, limit=int(request.args.get('limit', '5')))
return jsonify(
artists=[a.serialize() for a in artists]
)
@classmethod
@route('/songbook/artists/<int:id>', methods=['GET'])
def render_html(cls, id=0):
"""
output details of a selected artist to web client
"""
artist=cls.browse([id])[0]
return render_template(
'songbook_artist-detail.jinja',
artist=artist
)
@classmethod
@route('/songbook/artists', methods=['GET', 'POST'])
def render_html_index(cls):
"""
output artist list to web client
"""
name_filter = '%' + request.args.get('namecontains', '') + '%'
page = request.args.get('page', 1, int)
domain = [
('full_name', 'ilike', name_filter)
]
artists = Pagination(
cls, domain, page, 25
)
return render_template(
'songbook_artist-list.jinja',
artists=artists
)
| coalesco/trytond_songbook | artist.py | artist.py | py | 4,420 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "trytond.model.ModelSQL",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "trytond.model.ModelView",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "trytond.model.fields.Char",
"line_number": 25,
"usage_type": "call"
},
{
"api_name... |
19565236986 | import spacy
nlp = spacy.load('en_core_web_md')
with open('movies.txt', 'r') as f_movies:
movies = f_movies.readlines()
compare_movie = ('''Planet Hulk: Will he save
their world or destroy it? When the Hulk becomes too dangerous for the
Earth, the Illuminati trick Hulk into a shuttle and launch him into space to a
planet where the Hulk can live in peace. Unfortunately, Hulk land on the
planet Sakaar where he is sold into slavery and trained as a gladiator.''')
nlp_compare_movie = nlp(compare_movie)
most_similar_score = -1
most_similar_movie = ""
for movie in movies:
movie = nlp(movie)
similarity_score = nlp_compare_movie.similarity(movie)
if similarity_score > most_similar_score:
most_similar_score = similarity_score
most_similar_movie = movie.text
print("Most similar movie:", most_similar_movie)
print("Similarity score:", most_similar_score)
| vho1988/semantic_similarity | watch_next.py | watch_next.py | py | 921 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "spacy.load",
"line_number": 3,
"usage_type": "call"
}
] |
7706902084 | from bert_models.base_bert_model import BaseBertModel
import joblib
from sklearn.ensemble import GradientBoostingClassifier
import os
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import f1_score
class BaseBertModelWithBoost(BaseBertModel):
def __init__(self, output_dir, label_list, boost_model = "", gpu_device_num_hub=0,gpu_device_num = 1, batch_size = 16, max_seq_length = 256,\
bert_model_hub = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1", model_folder = "", label_column = "label",
use_concat_results=False):
BaseBertModel.__init__(self, output_dir, label_list, gpu_device_num_hub=gpu_device_num_hub,
gpu_device_num = gpu_device_num, batch_size = batch_size, max_seq_length = max_seq_length,
bert_model_hub = bert_model_hub, model_folder = model_folder, label_column = label_column,
use_concat_results = use_concat_results)
self.load_boost_model(boost_model)
def load_boost_model(self, folder):
self.sgBoost = joblib.load(os.path.join(folder,'GradientBoost.joblib')) if folder != "" else GradientBoostingClassifier()
def save_boost_model(self, folder):
if not os.path.exists(folder):
os.makedirs(folder)
joblib.dump(self.sgBoost, os.path.join(folder,'GradientBoost.joblib'))
def prepare_dataset_for_boosting(self, train, use_tail = False):
es_train_prob_study_type_11, res_train_full_study_type_11, train_y_study_type_11 = self.evaluate_model(train)
if use_tail:
es_train_prob_study_type_11_tail, res_train_full_study_type_11_tail, train_y_study_type_11_tail = self.evaluate_model(train, False)
sg_boost_x = np.concatenate([ es_train_prob_study_type_11, es_train_prob_study_type_11_tail],axis=1)
return sg_boost_x
return es_train_prob_study_type_11
def prepare_datasets_for_boosting(self, train, test, study_df, use_tail = False):
sg_boost_x = self.prepare_dataset_for_boosting(train, use_tail = use_tail)
sg_boost_test_x = self.prepare_dataset_for_boosting(test, use_tail = use_tail)
sg_boost_study_x = self.prepare_dataset_for_boosting(study_df, use_tail = use_tail)
return sg_boost_x, sg_boost_test_x, sg_boost_study_x
def train_boost_model(self, train, test, study_df, use_tail = False, n_estimators = 60, max_depth = 8, for_train = True):
sg_boost_x, sg_boost_test_x, sg_boost_study_x = self.prepare_datasets_for_boosting(train, test, study_df, use_tail = use_tail)
train_y = list(train[self.label_column].values)
test_y = list(test[self.label_column].values)
study_df_y = list(study_df[self.label_column].values)
if for_train:
self.sgBoost = GradientBoostingClassifier(n_estimators = n_estimators, max_depth=max_depth)
self.sgBoost.fit(sg_boost_test_x, test_y)
print(self.sgBoost.score(sg_boost_test_x, test_y))
print(self.sgBoost.score(sg_boost_x, train_y))
print(self.sgBoost.score(sg_boost_study_x, study_df_y))
print(confusion_matrix(study_df_y, self.sgBoost.predict(sg_boost_study_x)))
print(classification_report(study_df_y, self.sgBoost.predict(sg_boost_study_x)))
def evaluate_boost_model(self, test, use_tail = False):
sg_boost_test_x =self.prepare_dataset_for_boosting(test, use_tail = use_tail)
test_y = list(test[self.label_column].values)
print(self.sgBoost.score(sg_boost_test_x, test_y))
print(confusion_matrix(test_y, self.sgBoost.predict(sg_boost_test_x)))
print(classification_report(test_y, self.sgBoost.predict(sg_boost_test_x)))
def predict_with_boosting(self, df, with_head_tail = False):
res_prob, res_label, res_y = self.predict_for_df(df)
if with_head_tail:
res_prob_tail, res_label_tail, res_y = self.predict_for_df(df, is_head = False)
res_prob = np.concatenate([ res_prob, res_prob_tail],axis=1)
res_prob, res_label = [self.softmax(x) for x in self.sgBoost.decision_function(res_prob)], self.sgBoost.predict(res_prob)
return res_prob, res_label, res_y
| MariyaIvanina/articles_processing | src/bert_models/base_bert_model_with_boosting.py | base_bert_model_with_boosting.py | py | 4,205 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "bert_models.base_bert_model.BaseBertModel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "bert_models.base_bert_model.BaseBertModel.__init__",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bert_models.base_bert_model.BaseBertModel",
"line... |
42979785156 | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import render
from django.template import RequestContext
from .models import Project
# Create your views here.
def index(request):
'''Show all news'''
posts_list = Project.objects.all().order_by('-id')
paginator = Paginator(posts_list, 6)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
vars = dict(
posts=posts,
)
context = {
'posts': posts,
'paginator' : paginator,
}
return render(request, 'project/index.tpl', context)
| gulla-k/pwd | project/views.py | views.py | py | 749 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Project.objects.all",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Project.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "models.Project",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "dj... |
71032555624 | """add adventure logs
Revision ID: 36dc23330424
Revises: 0db346b0362b
Create Date: 2022-02-06 21:15:27.347180
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '36dc23330424'
down_revision = '0db346b0362b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('adventure_log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('discovered_locations_id', sa.Integer(), nullable=True),
sa.Column('discoverable_locations_id', sa.Integer(), nullable=True),
sa.Column('discovered_items_id', sa.Integer(), nullable=True),
sa.Column('discoverable_items_id', sa.Integer(), nullable=True),
sa.Column('game_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['discoverable_items_id'], ['item.id'], name=op.f('fk_adventure_log_discoverable_items_id_item')),
sa.ForeignKeyConstraint(['discoverable_locations_id'], ['location.id'], name=op.f('fk_adventure_log_discoverable_locations_id_location')),
sa.ForeignKeyConstraint(['discovered_items_id'], ['item.id'], name=op.f('fk_adventure_log_discovered_items_id_item')),
sa.ForeignKeyConstraint(['discovered_locations_id'], ['location.id'], name=op.f('fk_adventure_log_discovered_locations_id_location')),
sa.ForeignKeyConstraint(['game_id'], ['game.id'], name=op.f('fk_adventure_log_game_id_game')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_adventure_log'))
)
op.create_index(op.f('ix_adventure_log_id'), 'adventure_log', ['id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_adventure_log_id'), table_name='adventure_log')
op.drop_table('adventure_log')
# ### end Alembic commands ###
| lauraturnbull/griddle-earth | engine/migrations/alembic/versions/36dc23330424_add_adventure_logs.py | 36dc23330424_add_adventure_logs.py | py | 1,863 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
34566807465 | import pytorch_lightning as pl
from transformers import AdamW
class DST_Seq2Seq(pl.LightningModule):
def __init__(self, args, tokenizer, model):
super().__init__()
self.tokenizer = tokenizer
self.model = model
self.lr = args["lr"]
def training_step(self, batch, batch_idx):
self.model.train()
outputs = self.model(
input_ids=batch['encoder_input'],
attention_mask=batch['attention_mask'],
labels=batch['decoder_output']
)
return {'loss': outputs.loss, 'log': {'train_loss': outputs.loss}}
def validation_step(self, batch, batch_idx):
self.model.eval()
outputs = self.model(
input_ids=batch['encoder_input'],
attention_mask=batch['attention_mask'],
labels=batch['decoder_output']
)
return {'eval_loss': outputs.loss, 'log': {'eval_loss': outputs.loss}}
def validation_epoch_end(self, outputs):
eval_loss_mean = sum([output['eval_loss'] for output in outputs]) / len(outputs)
results = {'progress_bar': {'eval_loss': eval_loss_mean.item()}, 'log': {'eval_loss': eval_loss_mean.item()}, 'eval_loss': eval_loss_mean.item()}
return results
def configure_optimizers(self):
return AdamW(self.parameters(), lr=self.lr, correct_bias=True)
| minson123-github/ADL21-Final-Project | T5DST/model.py | model.py | py | 1,211 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytorch_lightning.LightningModule",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "transformers.AdamW",
"line_number": 35,
"usage_type": "call"
}
] |
35015760399 | import argparse
import numpy as np
import torch
from copy import deepcopy
from gluonts.dataset.multivariate_grouper import MultivariateGrouper
from gluonts.dataset.repository.datasets import get_dataset
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import MultivariateEvaluator
from tsdiff.forecasting.models import (
ScoreEstimator,
TimeGradTrainingNetwork_AutoregressiveOld, TimeGradPredictionNetwork_AutoregressiveOld,
TimeGradTrainingNetwork_Autoregressive, TimeGradPredictionNetwork_Autoregressive,
TimeGradTrainingNetwork_All, TimeGradPredictionNetwork_All,
TimeGradTrainingNetwork_RNN, TimeGradPredictionNetwork_RNN,
TimeGradTrainingNetwork_Transformer, TimeGradPredictionNetwork_Transformer,
TimeGradTrainingNetwork_CNN, TimeGradPredictionNetwork_CNN,
)
from tsdiff.utils import NotSupportedModelNoiseCombination, TrainerForecasting
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def energy_score(forecast, target):
obs_dist = np.mean(np.linalg.norm((forecast - target), axis=-1))
pair_dist = np.mean(
np.linalg.norm(forecast[:, np.newaxis, ...] - forecast, axis=-1)
)
return obs_dist - pair_dist * 0.5
def train(
seed: int,
dataset: str,
network: str,
noise: str,
diffusion_steps: int,
epochs: int,
learning_rate: float,
batch_size: int,
num_cells: int,
hidden_dim: int,
residual_layers: int,
):
np.random.seed(seed)
torch.manual_seed(seed)
covariance_dim = 4 if dataset != 'exchange_rate_nips' else -4
# Load data
dataset = get_dataset(dataset, regenerate=False)
target_dim = int(dataset.metadata.feat_static_cat[0].cardinality)
train_grouper = MultivariateGrouper(max_target_dim=min(2000, target_dim))
test_grouper = MultivariateGrouper(num_test_dates=int(len(dataset.test) / len(dataset.train)), max_target_dim=min(2000, target_dim))
dataset_train = train_grouper(dataset.train)
dataset_test = test_grouper(dataset.test)
val_window = 20 * dataset.metadata.prediction_length
dataset_train = list(dataset_train)
dataset_val = []
for i in range(len(dataset_train)):
x = deepcopy(dataset_train[i])
x['target'] = x['target'][:,-val_window:]
dataset_val.append(x)
dataset_train[i]['target'] = dataset_train[i]['target'][:,:-val_window]
# Load model
if network == 'timegrad':
if noise != 'normal':
raise NotSupportedModelNoiseCombination
training_net, prediction_net = TimeGradTrainingNetwork_Autoregressive, TimeGradPredictionNetwork_Autoregressive
elif network == 'timegrad_old':
if noise != 'normal':
raise NotSupportedModelNoiseCombination
training_net, prediction_net = TimeGradTrainingNetwork_AutoregressiveOld, TimeGradPredictionNetwork_AutoregressiveOld
elif network == 'timegrad_all':
training_net, prediction_net = TimeGradTrainingNetwork_All, TimeGradPredictionNetwork_All
elif network == 'timegrad_rnn':
training_net, prediction_net = TimeGradTrainingNetwork_RNN, TimeGradPredictionNetwork_RNN
elif network == 'timegrad_transformer':
training_net, prediction_net = TimeGradTrainingNetwork_Transformer, TimeGradPredictionNetwork_Transformer
elif network == 'timegrad_cnn':
training_net, prediction_net = TimeGradTrainingNetwork_CNN, TimeGradPredictionNetwork_CNN
estimator = ScoreEstimator(
training_net=training_net,
prediction_net=prediction_net,
noise=noise,
target_dim=target_dim,
prediction_length=dataset.metadata.prediction_length,
context_length=dataset.metadata.prediction_length,
cell_type='GRU',
num_cells=num_cells,
hidden_dim=hidden_dim,
residual_layers=residual_layers,
input_size=target_dim * 4 + covariance_dim,
freq=dataset.metadata.freq,
loss_type='l2',
scaling=True,
diff_steps=diffusion_steps,
beta_end=20 / diffusion_steps,
beta_schedule='linear',
num_parallel_samples=100,
pick_incomplete=True,
trainer=TrainerForecasting(
device=device,
epochs=epochs,
learning_rate=learning_rate,
num_batches_per_epoch=100,
batch_size=batch_size,
patience=10,
),
)
# Training
predictor = estimator.train(dataset_train, dataset_val, num_workers=8)
# Evaluation
forecast_it, ts_it = make_evaluation_predictions(dataset=dataset_test, predictor=predictor, num_samples=100)
forecasts = list(forecast_it)
targets = list(ts_it)
score = energy_score(
forecast=np.array([x.samples for x in forecasts]),
target=np.array([x[-dataset.metadata.prediction_length:] for x in targets])[:,None,...],
)
evaluator = MultivariateEvaluator(quantiles=(np.arange(20)/20.0)[1:], target_agg_funcs={'sum': np.sum})
agg_metric, _ = evaluator(targets, forecasts, num_series=len(dataset_test))
metrics = dict(
CRPS=agg_metric['mean_wQuantileLoss'],
ND=agg_metric['ND'],
NRMSE=agg_metric['NRMSE'],
CRPS_sum=agg_metric['m_sum_mean_wQuantileLoss'],
ND_sum=agg_metric['m_sum_ND'],
NRMSE_sum=agg_metric['m_sum_NRMSE'],
energy_score=score,
)
metrics = { k: float(v) for k,v in metrics.items() }
return metrics
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train forecasting model.')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--dataset', type=str)
parser.add_argument('--network', type=str, choices=[
'timegrad', 'timegrad_old', 'timegrad_all', 'timegrad_rnn', 'timegrad_transformer', 'timegrad_cnn'
])
parser.add_argument('--noise', type=str, choices=['normal', 'ou', 'gp'])
parser.add_argument('--diffusion_steps', type=int, default=100)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--learning_rate', type=int, default=1e-3)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--num_cells', type=int, default=100)
parser.add_argument('--hidden_dim', type=int, default=100)
parser.add_argument('--residual_layers', type=int, default=8)
args = parser.parse_args()
metrics = train(**args.__dict__)
for key, value in metrics.items():
print(f'{key}:\t{value:.4f}')
# Example:
# python -m tsdiff.forecasting.train --seed 1 --dataset electricity_nips --network timegrad_rnn --noise ou --epochs 100
| morganstanley/MSML | papers/Stochastic_Process_Diffusion/tsdiff/forecasting/train.py | train.py | py | 6,723 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "warnings.simplefilter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.cuda... |
73408801703 | from http.server import BaseHTTPRequestHandler, HTTPServer
import os
import cgi
import string
import random
from controller import *
hostname="localhost"
serverport=8000
global userid
userid=1
class server(BaseHTTPRequestHandler):
def do_GET(self):
if self.path =='/':
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
output=HomeViewHandler()
self.wfile.write(output.encode())
if self.path=='/post/create/':
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
output=PostCreateForm()
self.wfile.write(output.encode())
if self.path.startswith('/post/') and self.path[6:-1].isdigit() and self.path.endswith('/'):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
print(self.path[6:-1])
postID=self.path[6:-1]
#need some place holder code to check if the post exists or not
output=PostViewHandler(postID)
self.wfile.write(output.encode())
if self.path=='/search/':
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
print(self.path[6:-1])
#need some place holder code to check if the post exists or not
output=SearchViewHandler
self.wfile.write(output.encode())
if self.path.startswith('/search/results/') and self.path[-1:]=='/':
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
lookup= self.path[16:-1]
output=SearchResultHandler(lookup)
self.wfile.write(output.encode())
if self.path.startswith('/user/') and self.path[6:-1].isdigit() and self.path.endswith('/'):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
print(self.path[6:-1])
#need some place holder code to check if the post exists or not
output=''' <div>
<h2> This is a User </h2>
</div>
'''
self.wfile.write(output.encode())
if self.path=='/messages/':
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
print(self.path[6:-1])
#need some place holder code to check if the post exists or not
output=''' <div>
<h2> This page lists all the conversations</h2>
</div>
'''
self.wfile.write(output.encode())
if self.path.startswith('/messages/') and self.path[10:-1].isdigit() and self.path.endswith('/'):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
print(self.path[10:-1])
roomid=self.path[10:-1]
#need some place holder code to check if the post exists or not
output=''' <div>
<h2> This is a Specific Conversation room </h2>
<form method="POST" enctype="multipart/form-data" action="/messages/%s/">
<input name="msgContent"type="text" placeholder="Send something awesome for others to view">
<br>
<input type="submit" value="Send">
</form>
</div>
''' % (self.path[10:-1])
self.wfile.write(output.encode())
def do_POST(self):
if self.path=='/post/create/':
ctype,pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'],"utf-8")
#for demonstration purposes we have the user id hardcoded but irl that hardcoded value would be the userid
if ctype=='multipart/form-data':
fields=cgi.parse_multipart(self.rfile,pdict)
new_caption = fields.get('caption')
new_caption=str(new_caption)
new_caption=new_caption[3:-2]
new_title = fields.get('postTitle')
new_title=str(new_title)
new_title=new_title[3:-2]
new_file= fields.get('filename')
file_type=str(fields.get('type'))
fileformat=0
if file_type[3:-2] == 'on':
fileformat=1
new_file=str(new_file)
new_file=new_file[3:-2]
mediaID= 100
mediaID=MediaHandler(fileformat,new_file,userid)
#ideally when a user is created, it would also make a repository with user's unique id and all the media is stored in it
#in this line call a function to send data to db
postID=PostCreateHandler(mediaID, userid, new_title, new_caption)
redirect='/post/'+postID+'/'
self.send_response(301)
self.send_header('content-type','text/html')
self.send_header('Location', redirect)
self.end_headers()
if self.path=='/search/':
redirect='/search/results/'
ctype,pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'],"utf-8")
content_len= int(self.headers.get('Content-length'))
pdict['CONTENT-LENGTH']=content_len
if ctype=='multipart/form-data':
fields=cgi.parse_multipart(self.rfile,pdict)
results = fields.get('lookup')
print(results)
results=str(results)
results=results[3:-2]
results = results.replace(' ','-')
redirect+=results+'/'
#in this line call a function to send data to db
print(results)
self.send_response(301)
self.send_header('content-type','text/html')
self.send_header('Location', redirect)
self.end_headers()
if self.path.startswith('/messages/') and self.path[10:-1].isdigit() and self.path.endswith('/'):
ctype,pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'],"utf-8")
redirect=self.path
if ctype=='multipart/form-data':
fields=cgi.parse_multipart(self.rfile,pdict)
new_caption = fields.get('msgContent')
#in this line call a function to send data to db
print(new_caption)
self.send_response(301)
self.send_header('content-type','text/html')
self.send_header('Location', redirect)
self.end_headers()
httpd = HTTPServer((hostname, serverport),server)
httpd.serve_forever()
| crash1604/PurePythonBackend | server.py | server.py | py | 5,714 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "http.server.BaseHTTPRequestHandler",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "cgi.parse_header",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "cgi.parse_multipart",
"line_number": 110,
"usage_type": "call"
},
{
"api_nam... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.