seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
600932748 | # -*- coding: utf-8 -*-
import unittest
from wosedon.context.document import Document
import corpus2
def leniter(i):
""" Lenght of any iterable """
return sum(1 for e in i)
class TestSentence(unittest.TestCase):
def setUp(self):
tagset = corpus2.get_named_tagset('nkjp')
cclreader = corpus2.CclRelReader(tagset, 'tests/#Data/wiki_ccl.xml', 'tests/#Data/wiki_ccl.xml')
ccldoc = cclreader.read()
self.document = Document(tagset, ccldoc, ccldoc.relations())
def test_basics(self):
dc = 0
tc = 0
doc = self.document.next()
while doc:
dc += 1
for t in doc.tokens():
tc += 1
doc = self.document.next()
self.assertEquals(dc, 1)
self.assertEquals(tc, 411)
self.document.reset()
dc = 0
tc = 0
doc = self.document.next()
while doc:
dc += 1
for t in doc.tokens():
tc += 1
doc = self.document.next()
self.assertEquals(dc, 1)
self.assertEquals(tc, 411)
def test_wsd(self):
doc = self.document.next()
while doc:
for t in doc.tokens():
self.assertIsInstance(doc.get_token_lemma_str(t), str)
self.assertIn(doc.get_token_coarse_pos(t), ['n', 'v', 'a', None])
doc = self.document.next()
| null | eniam_src/tools/wosedon/wosedon/tests/context/test_document.py | test_document.py | py | 1,319 | python | en | code | null | code-starcoder2 | 51 |
261422052 | #####################################################################################################
# LGBIO2050 - TP1 : PCA & ICA
# Helper Functions to plot signals
#####################################################################################################
import matplotlib.pyplot as plt
import numpy as np
import os
root = os.getcwd()
from pathlib import Path
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.rcParams.update({'font.size': 16})
"""--------------------------------------------------------------------------------------------------
PLOT DATA POINTS IN A 2-DIMENSIONAL SPACE
INPUTS:
- x : list of coordinates along the first axis
- y : list of coordinates along the second axis
- title : title of the graph
- x_label : label of the x axis
- y_label : label of the y axis
- show_fig : True if the plot must be displayed on screen, False otherwise
- (file_path) : path where the graph must be saved (if needed)
--------------------------------------------------------------------------------------------------"""
def scatter_plot(x, y, title, x_label, y_label, show_fig, file_path=None):
plt.scatter(x, y, marker='.')
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.tight_layout()
# Save file
if file_path:
if not os.path.exists(Path(file_path).parent):
os.makedirs(Path(file_path).parent)
plt.savefig(file_path)
# Display graph on screen
if show_fig:
plt.show()
plt.close()
"""--------------------------------------------------------------------------------------------------
PLOT ECG SIGNALS DEPENDING ON THE TIME
INPUTS:
- ecg_signals : a matrix of [(n+1)xm] dimensions where n (nb of channels) << m
first row must be the time vector!
- show_fig : True if the plot must be displayed on screen, False otherwise
- (file_path) : path where the graph must be saved (if needed)
--------------------------------------------------------------------------------------------------"""
def ecg_plot(ecg_signals, show_fig, ch_names, file_path=None):
time = np.linspace(0, (ecg_signals.shape[1]/1000), ecg_signals.shape[1])
# Same y scale for all channels
bottom = np.amin(ecg_signals[0:ecg_signals.shape[1]])
top = np.amax(ecg_signals[0:ecg_signals.shape[1]])
# Plot each channel
for idx in range(1, ecg_signals.shape[0]+1):
fig = plt.figure(figsize=(16,8))
if idx == ecg_signals.shape[0]-1:
fig.patch.set_alpha(0)
plt.plot(time, ecg_signals[idx-1], linewidth=1)
plt.ylim(bottom, top)
plt.title(ch_names[idx-1])
# Save file
if file_path:
if not os.path.exists(Path(file_path).parent):
os.makedirs(Path(file_path).parent)
plt.savefig(file_path)
# Display graph on screen
if show_fig:
plt.show()
plt.close()
"""--------------------------------------------------------------------------------------------------
PLOT ECG SIGNALS DEPENDING ON THE TIME
INPUTS:
- ecg_signals : a matrix of [(n+1)xm] dimensions where n (nb of channels) << m
first row must be the time vector!
- show_fig : True if the plot must be displayed on screen, False otherwise
- (file_path) : path where the graph must be saved (if needed)
--------------------------------------------------------------------------------------------------"""
def ecg_plotbis(ecg_signals, show_fig, ch_names, target, file_path=None):
time = np.linspace(0, (ecg_signals.shape[1]/1000), ecg_signals.shape[1])
bottom = np.amin(ecg_signals[0:ecg_signals.shape[1]])
top = np.amax(ecg_signals[0:ecg_signals.shape[1]])
for idx in range(1, ecg_signals.shape[0]+1):
fig = plt.figure(figsize=(16,8))
if idx == ecg_signals.shape[0]-1:
fig.patch.set_alpha(0)
plt.plot(time, ecg_signals[idx-1], linewidth=1)
for i in target:
plt.vlines(x = i/1000, ymin = bottom, ymax = top)
plt.ylim(bottom, top)
plt.title(ch_names[idx-1])
# Save file
if file_path:
if not os.path.exists(Path(file_path).parent):
os.makedirs(Path(file_path).parent)
plt.savefig(file_path)
# Display graph on screen
if show_fig:
plt.show()
plt.close()
"""--------------------------------------------------------------------------------------------------
PLOT EEG SIGNALS DEPENDING ON THE TIME
INPUTS:
- eeg_signals : a matrix of [(n+1)xm] dimensions where n (nb of channels) << m
first row must be the time vector!
- label : list of n strings with channel names (do not consider time)
- show_fig : True if the plot must be displayed on screen, False otherwise
- (file_path) : path where the graph must be saved (if needed)
--------------------------------------------------------------------------------------------------"""
def eeg_plot(eeg_signals, label, show_fig, file_path=None):
# Same y scale for all channels
bottom = np.amin(eeg_signals[1:eeg_signals.shape[0]])
top = np.amax(eeg_signals[1:eeg_signals.shape[0]])
# One big figure to frame the whole
fig = plt.figure(figsize=(12,8))
ax0 = fig.add_subplot(111)
plt.subplots_adjust(hspace=-0.5)
ax0.tick_params(labelcolor='black', top=False, bottom=False, left=False, right=False)
# Plot each channel
for idx in range(1, eeg_signals.shape[0]):
if idx == 1 :
_ax = fig.add_subplot(eeg_signals.shape[0]-1, 1, idx)
ax = _ax
else:
ax = fig.add_subplot(eeg_signals.shape[0]-1, 1, idx, sharex=_ax)
if idx == eeg_signals.shape[0]-1:
ax.tick_params(labelcolor='black', top=False, bottom=True, left=False, right=False)
ax.patch.set_alpha(0)
ax.get_yaxis().set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlabel('Time (sec)')
else:
ax.axis('off')
ax.plot(eeg_signals[0], eeg_signals[idx], linewidth=0.5)
ax.set_ylim(bottom, top)
plt.text(-0.45, 0, label[idx-1])
ax0.get_yaxis().set_visible(False)
ax0.get_xaxis().set_visible(False)
# Save file
if file_path:
if not os.path.exists(Path(file_path).parent):
os.makedirs(Path(file_path).parent)
plt.savefig(file_path)
# Display graph on screen
if show_fig:
plt.show()
plt.close()
| null | make_graphs.py | make_graphs.py | py | 6,610 | python | en | code | null | code-starcoder2 | 51 |
388138249 | from datetime import datetime
from cms.utils import get_page_from_request
from annoying.functions import get_config
def page_ancestors(request):
page = get_page_from_request(request)
ancestors_list = list()
if page:
ancestors_list = [ ance.reverse_id for ance in page.get_ancestors() if ance.reverse_id ]
return { 'page_ancestors': ancestors_list }
def forum_period(request):
today = datetime.today()
return {
'REG_START' : get_config('REG_START', today),
'REG_END': get_config('REG_END', today),
'NOW': today,
}
| null | plugins/context_processors.py | context_processors.py | py | 582 | python | en | code | null | code-starcoder2 | 51 |
438098120 | from __future__ import print_function
import sys
from timeit import default_timer as timer
class Node(object):
""" DAG nodes with parents and children. """
def __init__(self, idx=None):
self.idx = idx # index in the DAG nlist
self.parents = []
self.children = []
self.level = 0
class LinkNode(object):
""" Typical linked list node to save the path from on category to another. """
def __init__(self, data=None):
self.data = data
self.link = None
def __eq__(self, o):
return self.data == o.data
def __cmp__(self, o):
if self.data < o.data:
return -1
elif self.data == o.data:
return 0
else:
return 1
def __hash__(self):
return self.data
def point(self, node):
self.link = node
class Dag(object):
@staticmethod
def loads_from(path):
print("Loading...", file=sys.stderr)
fin = open(path)
hlist = fin.readlines()
fin.close()
print("Constructing...", file=sys.stderr)
dag = Dag(len(hlist))
dag.loads(hlist)
print("Setting level...", file=sys.stderr)
dag.set_level()
return dag
def __init__(self, length):
self._root = None
self.nlist = []
for i in range(length):
self.nlist.append(Node(i))
@property
def root(self):
if self._root is None:
for node in self.nlist:
if len(node.parents) == 0:
self._root = node
break
return self._root
def set_level(self):
root = self.root
root.level = 0
queue = [root]
while len(queue) > 0:
head = queue.pop()
children = [self.nlist[c] for c in head.children]
for child in children:
child.level = max(child.level, head.level + 1)
queue.append(child)
def loads(self, hlist):
for h in hlist:
clist = [int(c) for c in h.strip().split('\t')]
father = clist[0]
for i in range(1, len(clist)):
child = clist[i]
self.nlist[father].children.append(child)
self.nlist[child].parents.append(father)
def shortest_path(self, idx1, idx2):
node1 = self.nlist[idx1]
node2 = self.nlist[idx2]
if node1.level > node2.level:
node1, node2 = node2, node1
ancestors = {}
lca = None
queue = [LinkNode(node1.idx)]
while len(queue) > 0:
head = queue.pop()
if not head.data in ancestors:
ancestors[head.data] = head
parents = self.nlist[head.data].parents
for parent in parents:
node = LinkNode(parent)
node.point(head)
queue.append(node)
queue = [LinkNode(node2.idx)]
while len(queue) > 0:
head = queue.pop()
if head.data in ancestors:
lca = head
break
parents = self.nlist[head.data].parents
for parent in parents:
node = LinkNode(parent)
node.point(head)
queue.append(node)
path = None
if lca is None:
raise NameError("No lowest common ancestor")
else:
p = lca
path = [p.data]
while p.link is not None:
p = p.link
path.append(p.data)
path.reverse()
p = ancestors[lca.data]
while p.link is not None:
p = p.link
path.append(p.data)
return path
def ancestor_path(self, clist):
count_stat = {} # store the number of paths
sum_stat = {} # store the sum of length of paths
queue = []
for c in clist:
node = LinkNode(c)
node.link = 1
queue.append(node)
count_stat[node.data] = 1
sum_stat[node.data] = 1
while len(queue) > 0:
head = queue.pop(0)
parents = self.nlist[head.data].parents
for p in parents:
node = LinkNode(p)
node.link = head.link + 1
if p in count_stat:
count_stat[p] += 1
sum_stat[p] += node.link
else:
count_stat[p] = 1
sum_stat[p] = node.link
queue.append(node)
stat = {}
for key in count_stat:
stat[key] = float(sum_stat[key])/count_stat[key]
return stat.items()
def level_list(self):
return [n.level for n in self.nlist]
if __name__ == "__main__":
dag = Dag.loads_from('./hierarchy_id.txt')
print("Calculating...", file=sys.stderr)
start = timer()
for i in range(1000):
dag.shortest_path(504593, 421917)
end = timer()
print("Used: {0}secs".format(end - start), file=sys.stderr)
| null | src/dag.py | dag.py | py | 5,082 | python | en | code | null | code-starcoder2 | 51 |
562369743 | import pandas as pd
import numpy as np
import talib as ta
import tushare as ts
from matplotlib import rc
import re
import time
import requests
from bs4 import BeautifulSoup
rc('mathtext', default='regular')
buy_stock_info = []
# import seaborn as sns
# sns.set_style('white')
def getHTMLText(url):
try:
r = requests.get(url)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def getCiXinList(list,stockURL='http://www.shdjt.com/flsort.asp?lb=993075'):
getStockList(list,stockURL)
def getStockList(lst,stockURL='http://www.shdjt.com/flsort.asp?lb=399905'):
html = getHTMLText(stockURL)
soup = BeautifulSoup(html, 'html.parser')
a = soup.find_all('a',class_='ared')
b = soup.find_all('a',class_='ablack')
c = soup.find_all('a',class_='ablue')
a.extend(b)
a.extend(c)
for item in a:
try:
href = item.attrs['href']
lst.append(re.findall(r"\d{6}", href)[0])
except:
continue
def getStock():
str = '''300741 华宝股份 603080 新疆火炬 603356 华菱精工
603711 香飘飘 300732 设研院 300735 光弘科技 603059 倍加洁
600025 华能水电 002918 蒙娜丽莎 603712 七一二 002927 泰永长征
002922 伊戈尔 002916 深南电路 002923 润都股份 603056 德邦股份
603329 上海雅仕 603809 豪能股份 300740 御家汇 603848 好太太 300730
科创信息 601838 成都银行 603161 科华控股 002929 润建通信 603709
中源家居 002920 德赛西威 300644 南京聚隆 603680 今创集团 002913
奥士康 002915 中欣氟材 603895 天永智能 300737 科顺股份 603506
南都物业 002921 联诚精密 300684 中石科技 002925 盈趣科技 603655
朗博科技 300624 万兴科技 300733 西菱动力 603871 嘉友国际 300731
科创新源 600901 江苏租赁 300729 乐歌股份 002928 华夏航空 603477
振静股份 601828 美凯龙 002926 华西证券 603516
淳中科技 002917 金奥博 603283 赛腾股份 603890 春秋电子 603156
养元饮品 002919 名臣健康 300738 奥飞数据 300739 明阳电路'''
str_arr = str.split()
stock_arr = []
for item in str_arr:
matchObj = re.match(r'(\d)', item, re.M | re.I)
if (matchObj):
stock_arr.append(item)
return stock_arr
def analyzeBuyMACD(df,code,days=30):
if (df.shape[0] - 32 > days):
stock_df = df.tail(2)
#print(stock_df)
date_str = time.strftime("%Y-%m-%d", time.localtime())
#print(date_str)
macd_array = stock_df['macd'].tolist()
date_array = stock_df['date'].tolist()
dea_array = stock_df['dea'].tolist()
diff_array = stock_df['diff'].tolist()
open_array =stock_df['open'].tolist()
high_array =stock_df['high'].tolist()
if macd_array[1] > 0 and macd_array[1] < 0.5 and macd_array[0] < 0 and diff_array[1] < -2:
if date_str == date_array[1]:
buy_stock_info.append([code,date_array[1],diff_array[1]])
print('Buy {} on {} when diff :{}'.format(code, date_array[1], diff_array[1]))
else:
pass
def analyzeSellMACD(df,code):
stock_df = df.tail(2)
macd_array = stock_df['macd'].tolist()
date_array = stock_df['date'].tolist()
dea_array = stock_df['dea'].tolist()
diff_array = stock_df['diff'].tolist()
open_array = stock_df['open'].tolist()
high_array = stock_df['high'].tolist()
if macd_array_slice[0]>macd_array_slice[1] and macd_array_slice[1]>0:
print('Sell on '+code+' '+date_array[1])
def getMACD(code):
dw = ts.get_k_data(code)
close = dw.close.values
dw['diff'], dw['dea'], dw['macd'] = ta.MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)
return dw[['date', 'close', 'high', 'open', 'macd', 'diff', 'dea']]
if __name__ == '__main__':
#for stock in ['601952']:
stock_list = []
getCiXinList(stock_list)
print(stock_list)
for stock in stock_list:
df = getMACD(stock)
#print(df)
analyzeBuyMACD(df,stock, days=30)
for stock_item in buy_stock_info:
print(stock_item)
| null | Ta_Lib/ta_lib_jenkins.py | ta_lib_jenkins.py | py | 4,346 | python | en | code | null | code-starcoder2 | 51 |
264743949 | N = [1]
N.sort()
dictA = {}
loc =0
N_values=[]
N_keys=[]
for i in range(1,len(N)+2):
dictA[i]=0
print(dictA)
for i in N:
dictA[i]+=1
print(dictA)
for k,v in dictA.items():
N_values.append(v)
N_keys.append(k)
for i in range(len(N_values)):
if N_values[i]== 0:
loc = i
print(N_keys[loc])
# for i in range(len(N)-1):
# if N[i+1] != N[i] + 1:
# print(N[i]+1)
| null | Missing integer.py | Missing integer.py | py | 441 | python | en | code | null | code-starcoder2 | 51 |
60878580 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode} head
# @return {ListNode}
def deleteDuplicates(self, head):
helper = ListNode(0)
helper.next = head
left = helper
right = head
while right != None:
if right.next == None or right.next.val != right.val:
#next is different from previous
left = right
right = right.next
else:
#next is same as previous
while right.next != None and right.val == right.next.val:
right = right.next
left.next = right.next
right = right.next
return helper.next | null | All Code/No.82 Remove_Duplicates_from_Sorted_List_II.py | No.82 Remove_Duplicates_from_Sorted_List_II.py | py | 769 | python | en | code | null | code-starcoder2 | 51 |
576440612 | import numpy as np
def softmax(X):
exps = np.exp(X)
return (exps.transpose() / np.sum(exps, axis=1)).transpose()
def cross_entropy(actual, predicted):
return -sum([actual[i] * np.log2(predicted[i]) for i in range(len(actual))])
def classify(predicted):
return np.argmax(predicted, axis=1) + 1
def gd(x, y):
lr = 0.99
w_curr = np.array([(0, 0, 0), (0, 0.0, 0.0)]) # check it again
for i in range(70):
y_predicted = np.matmul(x, w_curr)
y_p_probability = softmax(y_predicted)
Loss = sum(cross_entropy(y, y_p_probability)) / x.shape[0]
print(Loss)
# = x(predicted-actual) its derivative of entropy loss function
wd = (np.matmul(x.transpose(), y_p_probability - y))
w_curr = w_curr - wd * lr / x.shape[0]
return y_p_probability
x = np.array([(-1., 2.5), (-2., 5.), (-1.5, 4.), (-1., 2.3), (-2.5, 6.5), (-1.8, 4.),
(-1.2, -2.5), (-2.3, -3.), (-1.8, -4.), (-1.9, -2.3), (-2.9, -3.5), (-1.7, -4.),
(1., -4.5), (0.2, 5.), (0.5, -3.), (1.3, 2.3), (2.5, -1.0), (1.8, 3.)])
y = np.array([(1, 0, 0), (1, 0, 0), (1, 0, 0), (1, 0, 0), (1, 0, 0), (1, 0, 0),
(0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0),
(0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1)])
labels_actual = [np.where(r == 1)[0][0] for r in y]
y_prob = gd(x, y)
labels = classify(y_prob)
| null | Gradient/Gradient_classifier.py | Gradient_classifier.py | py | 1,431 | python | en | code | null | code-starcoder2 | 51 |
102554322 | import logging
import os
from collections import namedtuple
import numpy as np
import pandas as pd
import fenics as fe
import scipy as sp
from netCDF4 import Dataset, num2date
from metpy.units import units
import metpy.calc.thermo as thermo
from siphon.simplewebservice.wyoming import WyomingUpperAir
from letkf_forecasting import __version__
from letkf_forecasting.optical_flow import optical_flow
from letkf_forecasting.letkf_io import (
return_analysis_ensemble,
return_single_time,
extract_components,
save_netcdf,
read_coords,
)
from letkf_forecasting.random_functions import (
perturb_irradiance,
eig_2d_covariance,
perturb_winds,
)
from letkf_forecasting.advection import (
advect_5min_ensemble,
remove_divergence_ensemble,
noise_fun,
advect_5min_single,
remove_divergence_single
)
from letkf_forecasting.assimilation_accessories import (
ensemble_creator,
assimilation_position_generator
)
from letkf_forecasting.assimilation import (
assimilate_sat_to_wind,
assimilate_wrf,
reduced_enkf,
)
def set_up_param_dict(*, date, io, flags, advect_params, ens_params,
pert_params, sat2sat, sat2wind, wrf, opt_flow):
param_dict = date.copy()
param_dict.update(io)
param_dict.update(advect_params)
param_dict.update(ens_params)
param_dict.update(pert_params)
for adict in [flags, sat2sat, sat2wind, wrf, opt_flow]:
temp = adict.copy()
name = temp['name'] + '_'
del temp['name']
keys = list(temp.keys())
for k in keys:
temp[name + k] = temp.pop(k)
param_dict.update(temp)
param_dict['git_version'] = __version__
return param_dict
def dict2nt(adict, aname):
nt = namedtuple(aname, adict.keys())(**adict)
return nt
def calc_system_variables(*, coords, advect_params, flags, pert_params):
dx = (coords.we[1] - coords.we[0])*1000
dy = (coords.sn[1] - coords.sn[0])*1000 # dx, dy in m not km
max_horizon = pd.Timedelta(advect_params['max_horizon'])
ci_crop_shape = np.array([coords.sn_crop.size,
coords.we_crop.size],
dtype='int')
U_crop_shape = np.array([coords.sn_crop.size,
coords.we_stag_crop.size],
dtype='int')
V_crop_shape = np.array([coords.sn_stag_crop.size,
coords.we_crop.size],
dtype='int')
U_crop_size = U_crop_shape[0]*U_crop_shape[1]
V_crop_size = V_crop_shape[0]*V_crop_shape[1]
wind_size = U_crop_size + V_crop_size
num_of_horizons = int((max_horizon/15).seconds/60)
sys_vars = {'dx': dx, 'dy': dy,
'num_of_horizons': num_of_horizons,
'max_horizon': max_horizon,
'ci_crop_shape': ci_crop_shape,
'U_crop_shape': U_crop_shape,
'V_crop_shape': V_crop_shape,
'U_crop_size': U_crop_size,
'V_crop_size': V_crop_size,
'wind_size': wind_size}
if flags['div']:
mesh = fe.RectangleMesh(fe.Point(0, 0),
fe.Point(int(V_crop_shape[1] - 1),
int(U_crop_shape[0] - 1)),
int(V_crop_shape[1] - 1),
int(U_crop_shape[0] - 1))
FunctionSpace_wind = fe.FunctionSpace(mesh, 'P', 1)
sys_vars['FunctionSpace_wind'] = FunctionSpace_wind
if flags['perturbation']:
rf_eig, rf_vectors = eig_2d_covariance(
x=coords.we_crop, y=coords.sn_crop,
Lx=pert_params['Lx'],
Ly=pert_params['Ly'], tol=pert_params['tol'])
rf_approx_var = (
rf_vectors * rf_eig[None, :] * rf_vectors).sum(-1).mean()
sys_vars['rf_eig'] = rf_eig
sys_vars['rf_vectors'] = rf_vectors
sys_vars['rf_approx_var'] = rf_approx_var
if flags['perturb_winds']:
rf_eig, rf_vectors = eig_2d_covariance(
coords.we_crop, coords.sn_crop,
Lx=pert_params['Lx_wind'],
Ly=pert_params['Ly_wind'], tol=pert_params['tol_wind'])
rf_approx_var = (
rf_vectors * rf_eig[None, :] * rf_vectors).sum(-1).mean()
rf_eig = rf_eig*pert_params['Lx_wind']**2
sys_vars['rf_eig_wind'] = rf_eig
sys_vars['rf_vectors_wind'] = rf_vectors
sys_vars['rf_approx_var_wind'] = rf_approx_var
sys_vars = dict2nt(sys_vars, 'sys_vars')
return sys_vars
def calc_assim_variables(*, sys_vars, advect_params, flags, sat2sat, sat2wind,
wrf):
assim_vars = {}
if flags['assim_sat2sat']:
assim_pos, assim_pos_2d, full_pos_2d = (
assimilation_position_generator(sys_vars.ci_crop_shape,
sat2sat['grid_size']))
noise_init = noise_fun(sys_vars.ci_crop_shape)
assim_vars['assim_pos'] = assim_pos
assim_vars['assim_pos_2d'] = assim_pos_2d
assim_vars['full_pos_2d'] = full_pos_2d
assim_vars['noise_init'] = noise_init
if flags['assim_sat2wind']:
assim_pos_sat2wind, assim_pos_2d_sat2wind, full_pos_2d_sat2wind = (
assimilation_position_generator(sys_vars.ci_crop_shape,
sat2wind['grid_size']))
assim_vars['assim_pos_sat2wind'] = assim_pos_sat2wind
assim_vars['assim_pos_2d_sat2wind'] = assim_pos_2d_sat2wind
assim_vars['full_pos_2d_sat2wind'] = full_pos_2d_sat2wind
assim_pos_U, assim_pos_2d_U, full_pos_2d_U = (
assimilation_position_generator(sys_vars.U_crop_shape,
sat2wind['grid_size']))
assim_pos_V, assim_pos_2d_V, full_pos_2d_V = (
assimilation_position_generator(sys_vars.V_crop_shape,
sat2wind['grid_size']))
assim_vars['assim_pos_U'] = assim_pos_U
assim_vars['assim_pos_2d_U'] = assim_pos_2d_U
assim_vars['full_pos_2d_U'] = full_pos_2d_U
assim_vars['assim_pos_V'] = assim_pos_V
assim_vars['assim_pos_2d_V'] = assim_pos_2d_V
assim_vars['full_pos_2d_V'] = full_pos_2d_V
if flags['assim_wrf']:
assim_pos_U_wrf, assim_pos_2d_U_wrf, full_pos_2d_U_wrf = (
assimilation_position_generator(sys_vars.U_crop_shape,
wrf['grid_size']))
assim_pos_V_wrf, assim_pos_2d_V_wrf, full_pos_2d_V_wrf = (
assimilation_position_generator(sys_vars.V_crop_shape,
wrf['grid_size']))
assim_vars['assim_pos_U_wrf'] = assim_pos_U_wrf
assim_vars['assim_pos_2d_U_wrf'] = assim_pos_2d_U_wrf
assim_vars['full_pos_2d_U_wrf'] = full_pos_2d_U_wrf
assim_vars['assim_pos_V_wrf'] = assim_pos_V_wrf
assim_vars['assim_pos_2d_V_wrf'] = assim_pos_2d_V_wrf
assim_vars['full_pos_2d_V_wrf'] = full_pos_2d_V_wrf
assim_vars = dict2nt(assim_vars, 'assim_vars')
return assim_vars
def return_wind_time(*, sat_time, coords):
int_index_wind = coords.wind_times.get_loc(sat_time,
method='pad')
wind_time = coords.wind_times[int_index_wind]
return wind_time
def return_ensemble(*, data_file_path, ens_params, coords, flags):
sat_time = coords.sat_times[0]
wind_time = return_wind_time(sat_time=sat_time, coords=coords)
q = return_single_time(data_file_path, coords.sat_times_all,
sat_time, [coords.sn_slice],
[coords.we_slice], ['ci'])[0]
if flags['radiosonde']:
station = 'TUS'
df = WyomingUpperAir.request_data(sat_time.date(), station)
T = df['temperature'].values * units(df.units['temperature'])
Td = df['dewpoint'].values * units(df.units['dewpoint'])
u_wind = df['u_wind'].values * units(df.units['u_wind'])
u_wind = u_wind.to(units.meter/units.second)
v_wind = df['v_wind'].values * units(df.units['v_wind'])
v_wind = v_wind.to(units.meter/units.second)
rh = thermo.relative_humidity_from_dewpoint(T, Td)
max_arg = np.argmax(rh)
u_size = coords.we_stag_crop.size * coords.sn_crop.size
v_size = coords.we_crop.size * coords.sn_stag_crop.size
U = np.ones(u_size)*u_wind[max_arg]
V = np.ones(v_size)*v_wind[max_arg]
elif flags['opt_flow']:
opt_flow_folder = os.path.split(data_file_path)[0]
opt_flow_file = os.path.join(opt_flow_folder, 'data_opt_flow.nc')
of_sat_time = coords.sat_times[1]
U, V = return_single_time(opt_flow_file, coords.sat_times_all,
of_sat_time,
[coords.sn_slice, coords.sn_stag_slice],
[coords.we_stag_slice, coords.we_slice],
['U_opt_flow', 'V_opt_flow'])
time_step = (of_sat_time - sat_time).seconds
U = U * (250 / time_step)
V = V * (250 / time_step)
U = U.clip(min=-50, max=50)
V = V.clip(min=-50, max=50)
else:
U, V = return_single_time(data_file_path, coords.wind_times, wind_time,
[coords.sn_slice, coords.sn_stag_slice],
[coords.we_stag_slice, coords.we_slice],
['U', 'V'])
U, V = smooth_winds(U, V)
if flags['wrf_mean']:
U = np.ones_like(U)*U.mean()
V = np.ones_like(V)*V.mean()
if flags['assim']:
ensemble = ensemble_creator(
q, U, V, CI_sigma=ens_params['ci_sigma'],
wind_sigma=ens_params['winds_sigma'],
ens_size=ens_params['ens_num'])
else:
ensemble = np.concatenate([U.ravel(), V.ravel(), q.ravel()])[:, None]
shape = ensemble.shape
ensemble = np.ma.compressed(ensemble).reshape(shape)
return ensemble
def forecast_setup(*, data_file_path, date, io, advect_params, ens_params,
pert_params, flags, sat2sat, sat2wind, wrf, opt_flow,
results_file_path):
param_dict = set_up_param_dict(
date=date, io=io, advect_params=advect_params, ens_params=ens_params,
pert_params=pert_params, flags=flags, sat2sat=sat2sat,
sat2wind=sat2wind, wrf=wrf, opt_flow=opt_flow)
coords = read_coords(data_file_path=data_file_path,
advect_params=advect_params, flags=flags)
sys_vars = calc_system_variables(
coords=coords, advect_params=advect_params, flags=flags,
pert_params=pert_params)
if 'analysis_fore' in flags:
if flags['analysis_fore']:
sat_time = coords.sat_times[0]
ensemble = return_analysis_ensemble(
sat_time=sat_time, results_file_path=results_file_path)
else:
ensemble = return_ensemble(data_file_path=data_file_path,
ens_params=ens_params,
coords=coords, flags=flags)
else:
ensemble = return_ensemble(data_file_path=data_file_path,
ens_params=ens_params,
coords=coords, flags=flags)
if flags['assim']:
assim_vars = calc_assim_variables(sys_vars=sys_vars,
advect_params=advect_params,
flags=flags, sat2sat=sat2sat,
sat2wind=sat2wind, wrf=wrf)
else:
assim_vars = None
return param_dict, coords, sys_vars, assim_vars, ensemble
def preprocess(*, ensemble, flags, remove_div_flag, coords, sys_vars):
if remove_div_flag and flags['div']:
logging.debug('remove divergence')
ensemble[:sys_vars.wind_size] = remove_divergence_ensemble(
FunctionSpace=sys_vars.FunctionSpace_wind,
wind_ensemble=ensemble[:sys_vars.wind_size],
U_crop_shape=sys_vars.U_crop_shape,
V_crop_shape=sys_vars.V_crop_shape, sigma=4)
return ensemble
def forecast(*, ensemble, flags, coords, time_index, sat_time,
sys_vars, advect_params, pert_params, assim_vars, workers):
save_times = pd.date_range(sat_time,
periods=(sys_vars.num_of_horizons + 1),
freq='15min')
save_times = save_times.tz_convert(None)
if time_index + 1 < coords.sat_times.size:
num_of_advect = int((
coords.sat_times[time_index + 1] -
coords.sat_times[time_index]).seconds/(60*15))
else:
num_of_advect = 0
background = None
logging.debug(f'15min steps to background: {num_of_advect}')
ensemble_array = ensemble.copy()[None, :, :]
cx = abs(ensemble[:sys_vars.U_crop_size]).max()
cy = abs(ensemble[sys_vars.U_crop_size:
sys_vars.wind_size]).max()
T_steps = int(np.ceil((5*60)*(cx/sys_vars.dx
+ cy/sys_vars.dy)
/ advect_params['C_max']))
dt = (5*60)/T_steps
for m in range(sys_vars.num_of_horizons):
logging.info(str(pd.Timedelta('15min')*(m + 1)))
for n in range(3):
if flags['perturb_winds']:
ensemble[:sys_vars.wind_size] = perturb_winds(
ensemble[:sys_vars.wind_size], sys_vars, pert_params)
if flags['assim']:
ensemble = advect_5min_ensemble(
ensemble, dt, sys_vars.dx, sys_vars.dy,
T_steps,
sys_vars.U_crop_shape, sys_vars.V_crop_shape,
sys_vars.ci_crop_shape, workers)
else:
ensemble[:, 0] = advect_5min_single(
ensemble[:, 0], dt, sys_vars.dx, sys_vars.dy,
T_steps,
sys_vars.U_crop_shape, sys_vars.V_crop_shape,
sys_vars.ci_crop_shape)
ensemble[sys_vars.wind_size:] = (ensemble[sys_vars.wind_size:]
.clip(min=0, max=1))
if flags['perturbation']:
ensemble[sys_vars.wind_size:] = perturb_irradiance(
ensemble[sys_vars.wind_size:], sys_vars.ci_crop_shape,
pert_params['edge_weight'],
pert_params['pert_mean'],
pert_params['pert_sigma'],
sys_vars.rf_approx_var,
sys_vars.rf_eig, sys_vars.rf_vectors)
ensemble_array = np.concatenate(
[ensemble_array, ensemble[None, :, :]],
axis=0)
if num_of_advect == (m + 1):
background = ensemble.copy()
return ensemble_array, save_times, background
def save(*, ensemble_array, coords, ens_params, param_dict, sys_vars,
save_times, results_file_path, flags):
U, V, ci = extract_components(
ensemble_array, ens_params['ens_num'], sys_vars.num_of_horizons + 1,
sys_vars.U_crop_shape, sys_vars.V_crop_shape, sys_vars.ci_crop_shape)
save_netcdf(
results_file_path, U, V, ci, param_dict,
coords.we_crop, coords.sn_crop,
coords.we_stag_crop, coords.sn_stag_crop,
save_times, ens_params['ens_num'], flags)
def maybe_assim_sat2sat(*, ensemble, data_file_path, sat_time,
coords, sys_vars, flags):
if 'analysis_fore' in flags:
if flags['analysis_fore']:
return ensemble
if flags['assim_sat2sat']:
raise NotImplementedError
else:
q = return_single_time(data_file_path, coords.sat_times_all,
sat_time, [coords.sn_slice], [coords.we_slice],
['ci'])[0]
ensemble[sys_vars.wind_size:] = q.ravel()[:, None]
return ensemble
def maybe_assim_sat2wind(*, ensemble, data_file_path, sat_time,
coords, sys_vars, assim_vars, sat2wind,
flags):
if 'analysis_fore' in flags:
if flags['analysis_fore']:
return ensemble, False
if flags['assim_sat2wind']:
logging.debug('Assim sat2wind')
q = return_single_time(data_file_path, coords.sat_times_all,
sat_time, [coords.sn_slice], [coords.we_slice],
['ci'])[0]
ensemble = assimilate_sat_to_wind(
ensemble=ensemble,
observations=q.ravel(),
R_inverse_wind=1/sat2wind['sig']**2,
wind_inflation=sat2wind['infl'],
domain_shape=sys_vars.ci_crop_shape,
U_shape=sys_vars.U_crop_shape,
V_shape=sys_vars.V_crop_shape,
localization_length_wind=sat2wind['loc'],
assimilation_positions=assim_vars.assim_pos_sat2wind,
assimilation_positions_2d=assim_vars.assim_pos_2d_sat2wind,
full_positions_2d=assim_vars.full_pos_2d_sat2wind)
div_sat2wind_flag = True
else:
div_sat2wind_flag = False
return ensemble, div_sat2wind_flag
def maybe_assim_wrf(*, ensemble, data_file_path, sat_time,
coords, sys_vars, assim_vars, wrf,
ens_params, flags):
if 'analysis_fore' in flags:
if flags['analysis_fore']:
return ensemble, False
wind_time = return_wind_time(sat_time=sat_time, coords=coords)
if sat_time == wind_time and not flags['radiosonde']:
U, V = return_single_time(data_file_path, coords.wind_times,
wind_time,
[coords.sn_slice, coords.sn_stag_slice],
[coords.we_stag_slice, coords.we_slice],
['U', 'V'])
U, V = smooth_winds(U, V)
if flags['wrf_mean']:
U = np.ones_like(U)*U.mean()
V = np.ones_like(V)*V.mean()
div_wrf_flag = True
if flags['assim_wrf']:
logging.debug('Assim WRF')
R_inverse = 1/wrf['sig']**2
localization_length_wind = int(round(
wrf['loc'] / (coords.we[1] - coords.we[0])))
ensemble[:sys_vars.U_crop_size] = assimilate_wrf(
ensemble=ensemble[:sys_vars.U_crop_size],
observations=U.ravel(),
R_inverse=R_inverse,
wind_inflation=wrf['infl'],
wind_shape=sys_vars.U_crop_shape,
localization_length_wind=localization_length_wind,
assimilation_positions=assim_vars.assim_pos_U_wrf,
assimilation_positions_2d=assim_vars.assim_pos_2d_U_wrf,
full_positions_2d=assim_vars.full_pos_2d_U_wrf)
ensemble[sys_vars.U_crop_size:sys_vars.wind_size] = assimilate_wrf(
ensemble=ensemble[sys_vars.U_crop_size:
sys_vars.wind_size],
observations=V.ravel(),
R_inverse=R_inverse,
wind_inflation=wrf['infl'],
wind_shape=sys_vars.V_crop_shape,
localization_length_wind=localization_length_wind,
assimilation_positions=assim_vars.assim_pos_V_wrf,
assimilation_positions_2d=assim_vars.assim_pos_2d_V_wrf,
full_positions_2d=assim_vars.full_pos_2d_V_wrf)
elif not flags['opt_flow']:
logging.debug('replace WRF')
if ensemble.shape[1] > 1:
random_nums = np.random.normal(
loc=0,
scale=ens_params['winds_sigma'][0],
size=ens_params['ens_num'])
ensemble[:sys_vars.U_crop_size] = (U.ravel()[:, None]
+ random_nums[None, :])
random_nums = np.random.normal(
loc=0,
scale=ens_params['winds_sigma'][1],
size=ens_params['ens_num'])
ensemble[sys_vars.U_crop_size:
sys_vars.wind_size] = (
V.ravel()[:, None]
+ random_nums[None, :])
else:
ensemble[:sys_vars.U_crop_size] = U.ravel()[:, None]
ensemble[sys_vars.U_crop_size:
sys_vars.wind_size] = V.ravel()[:, None]
else:
div_wrf_flag = False
return ensemble, div_wrf_flag
def smooth_winds(U, V):
U = sp.ndimage.filters.gaussian_filter(U, sigma=60)
V = sp.ndimage.filters.gaussian_filter(V, sigma=60)
return U, V
def return_opt_flow(*, coords, time_index, sat_time, data_file_path, sys_vars):
# retreive OPT_FLOW vectors
wind_time = return_wind_time(sat_time=sat_time, coords=coords)
time0 = coords.sat_times[time_index - 1]
this_U, this_V = return_single_time(data_file_path, coords.wind_times,
wind_time,
[slice(None), slice(None)],
[slice(None), slice(None)],
['U', 'V'])
image0 = return_single_time(data_file_path, coords.sat_times_all,
time0, [slice(None)], [slice(None)],
['ci'])[0]
image1 = return_single_time(data_file_path, coords.sat_times_all,
sat_time, [slice(None)], [slice(None)],
['ci'])[0]
u_opt_flow, v_opt_flow, pos = optical_flow(image0, image1,
time0, sat_time,
this_U, this_V)
if u_opt_flow.size == 0:
nothing = np.array([])
return nothing, nothing, nothing, nothing
del this_U, this_V, image0, image1
pos = pos*4 # optical flow done on coarse grid
# need to select only pos in crop domain; convert to crop
keep = np.logical_and(
np.logical_and(pos[:, 0] > coords.we_slice.start,
pos[:, 0] < coords.we_slice.stop),
np.logical_and(pos[:, 1] > coords.sn_slice.start,
pos[:, 1] < coords.sn_slice.stop))
pos = pos[keep]
u_opt_flow = u_opt_flow[keep]
v_opt_flow = v_opt_flow[keep]
pos[:, 0] -= coords.we_slice.start
pos[:, 1] -= coords.sn_slice.start
pos = pos.T
pos = pos[::-1]
u_opt_flow_flat_pos = np.ravel_multi_index(pos, sys_vars.U_crop_shape)
v_opt_flow_flat_pos = np.ravel_multi_index(pos, sys_vars.V_crop_shape)
return u_opt_flow, v_opt_flow, u_opt_flow_flat_pos, v_opt_flow_flat_pos
def maybe_assim_opt_flow(*, ensemble, data_file_path, sat_time, time_index,
coords, sys_vars, flags, opt_flow):
if flags['assim_opt_flow']:
div_opt_flow_flag = True
logging.debug('calc opt_flow')
returned = return_opt_flow(
coords=coords, time_index=time_index, sat_time=sat_time,
data_file_path=data_file_path, sys_vars=sys_vars)
u_opt_flow, v_opt_flow = returned[:2]
u_opt_flow_flat_pos, v_opt_flow_flat_pos = returned[2:]
if u_opt_flow.size == 0:
div_opt_flow_flag = False
to_return = (ensemble, div_opt_flow_flag)
return to_return
logging.debug('assim opt_flow')
x_temp = np.arange(sys_vars.U_crop_shape[1])*sys_vars.dx/1000 # in km
y_temp = np.arange(sys_vars.U_crop_shape[0])*sys_vars.dy/1000
x_temp, y_temp = np.meshgrid(x_temp, y_temp)
ensemble[:sys_vars.U_crop_size] = reduced_enkf(
ensemble=ensemble[:sys_vars.U_crop_size],
observations=u_opt_flow, R_sig=opt_flow['sig'],
flat_locations=u_opt_flow_flat_pos,
inflation=opt_flow['infl'],
localization=opt_flow['loc'],
x=x_temp.ravel(), y=y_temp.ravel())
x_temp = np.arange(sys_vars.V_crop_shape[1])*sys_vars.dx/1000
y_temp = np.arange(sys_vars.V_crop_shape[0])*sys_vars.dy/1000
x_temp, y_temp = np.meshgrid(x_temp, y_temp)
ensemble[sys_vars.U_crop_size:
sys_vars.wind_size] = reduced_enkf(
ensemble=ensemble[sys_vars.U_crop_size:
sys_vars.wind_size],
observations=v_opt_flow, R_sig=opt_flow['sig'],
flat_locations=v_opt_flow_flat_pos,
inflation=opt_flow['infl'],
localization=opt_flow['loc'],
x=x_temp.ravel(), y=y_temp.ravel())
to_return = (ensemble, div_opt_flow_flag, u_opt_flow, v_opt_flow,
u_opt_flow_flat_pos, v_opt_flow_flat_pos)
elif flags['opt_flow']:
div_opt_flow_flag = True
opt_flow_folder = os.path.split(data_file_path)[0]
opt_flow_file = os.path.join(opt_flow_folder, 'data_opt_flow.nc')
U, V = return_single_time(opt_flow_file, coords.sat_times_all,
sat_time,
[coords.sn_slice, coords.sn_stag_slice],
[coords.we_stag_slice, coords.we_slice],
['U_opt_flow', 'V_opt_flow'])
time_step = (sat_time - coords.sat_times[time_index - 1]).seconds
U = U * (250 / time_step)
V = V * (250 / time_step)
U = U.clip(min=-50, max=50)
V = V.clip(min=-50, max=50)
ensemble[:sys_vars.U_crop_size] = U.ravel()[:, None]
ensemble[sys_vars.U_crop_size:
sys_vars.wind_size] = V.ravel()[:, None]
else:
div_opt_flow_flag = False
to_return = (ensemble, div_opt_flow_flag)
return to_return
def maybe_load_analysis(*, sat_time, results_file_path, flags,
ensemble):
if 'analysis_fore' in flags:
if flags['analysis_fore']:
ensemble = return_analysis_ensemble(
sat_time=sat_time, results_file_path=results_file_path)
return ensemble
else:
return ensemble
else:
return ensemble
def forecast_system(*, data_file_path, results_file_path,
date, io, flags, advect_params, ens_params, pert_params,
sat2sat, sat2wind, wrf, opt_flow, workers):
param_dict, coords, sys_vars, assim_vars, ensemble = forecast_setup(
data_file_path=data_file_path, date=date, io=io,
flags=flags, advect_params=advect_params,
ens_params=ens_params, pert_params=pert_params,
sat2sat=sat2sat, sat2wind=sat2wind, wrf=wrf,
opt_flow=opt_flow, results_file_path=results_file_path)
remove_div_flag = True
ensemble = preprocess(
ensemble=ensemble, flags=flags,
remove_div_flag=remove_div_flag,
coords=coords, sys_vars=sys_vars)
time_index = 0
sat_time = coords.sat_times[time_index]
ensemble_array, save_times, ensemble = forecast(
ensemble=ensemble, sat_time=sat_time,
flags=flags, coords=coords, time_index=time_index,
sys_vars=sys_vars,
advect_params=advect_params, pert_params=pert_params,
assim_vars=assim_vars, workers=workers)
save(ensemble_array=ensemble_array, coords=coords,
ens_params=ens_params, param_dict=param_dict,
sys_vars=sys_vars, save_times=save_times,
results_file_path=results_file_path,
flags=flags)
for time_index in range(1, coords.sat_times.size):
sat_time = coords.sat_times[time_index]
logging.info(str(sat_time))
ensemble = maybe_load_analysis(
sat_time=sat_time, flags=flags,
ensemble=ensemble, results_file_path=results_file_path)
ensemble = maybe_assim_sat2sat(
ensemble=ensemble, data_file_path=data_file_path,
sat_time=sat_time, coords=coords, sys_vars=sys_vars,
flags=flags)
ensemble, div_sat2wind_flag = maybe_assim_sat2wind(
ensemble=ensemble, data_file_path=data_file_path,
sat_time=sat_time, coords=coords, sys_vars=sys_vars,
assim_vars=assim_vars, sat2wind=sat2wind,
flags=flags)
ensmeble, div_wrf_flag = maybe_assim_wrf(
ensemble=ensemble, data_file_path=data_file_path,
sat_time=sat_time, coords=coords, sys_vars=sys_vars,
assim_vars=assim_vars, wrf=wrf,
ens_params=ens_params,
flags=flags)
ensemble, div_opt_flow_flag = maybe_assim_opt_flow(
ensemble=ensemble, data_file_path=data_file_path,
sat_time=sat_time, time_index=time_index,
coords=coords, sys_vars=sys_vars,
flags=flags, opt_flow=opt_flow)
remove_div_flag = (div_sat2wind_flag
or div_wrf_flag
or div_opt_flow_flag)
ensemble = preprocess(
ensemble=ensemble, flags=flags,
remove_div_flag=remove_div_flag,
coords=coords, sys_vars=sys_vars)
ensemble_array, save_times, ensemble = forecast(
ensemble=ensemble, sat_time=sat_time,
flags=flags, coords=coords, time_index=time_index,
sys_vars=sys_vars,
advect_params=advect_params, pert_params=pert_params,
assim_vars=assim_vars, workers=workers)
save(ensemble_array=ensemble_array, coords=coords,
ens_params=ens_params, param_dict=param_dict,
sys_vars=sys_vars, save_times=save_times,
results_file_path=results_file_path,
flags=flags)
| null | letkf_forecasting/letkf_forecasting.py | letkf_forecasting.py | py | 29,803 | python | en | code | null | code-starcoder2 | 51 |
477285979 | #!/usr/bin/python
# -*- coding:utf-8 -*-
from Web import db
import hashlib
from datetime import datetime
from Web.models import *
from decimal import *
import xlrd
if __name__ == '__main__':
db.drop_all()
db.create_all()
# 系统菜单
# m = Menu(name='用户管理',link='#',url='',pid=1,published=True,order=0)
menus = [
Menu(name='系统管理', link='fa fa-th-larg', url='#', pid=0, published=True, order=0),
Menu(name='用户管理', link='', url='/user', pid=1, published=True, order=0),
Menu(name='角色管理', link='', url='#', pid=1, published=True, order=0),
Menu(name='菜单管理', link='', url='#', pid=1, published=True, order=0),
Menu(name='权限管理', link='', url='#', pid=1, published=True, order=0),
Menu(name='会员管理', link='fa', url='#', pid=0, published=True, order=0),
Menu(name='人力资源', link='', url='#', pid=1, published=True, order=0)
]
for m in menus:
db.session.add(m)
# 系统用户
u = User(name='admin', email='@', nickname='管理员', password=hashlib.new("md5", "admin".encode("utf-8")).hexdigest(),
reg_time=datetime.now(), status=0, deleted=False)
db.session.add(u)
#
Categorys = [
AssetCategory(name='医疗设备', code='01'),
AssetCategory(name='医疗家具', code='02'),
AssetCategory(name='办公家具', code='03'),
AssetCategory(name='电子及办公设备', code='04'),
AssetCategory(name='办公车辆', code='05')
]
for c in Categorys:
db.session.add(c)
db.session.commit()
types = [
ManagerType(name='在帐资产'),
ManagerType(name='在帐资产(行政)'),
ManagerType(name='报废资产'),
ManagerType(name='暂存资产'),
ManagerType(name='实物资产'),
ManagerType(name='临时资产'),
ManagerType(name='实物报废')
]
for t in types:
db.session.add(t)
db.session.commit()
excel = xlrd.open_workbook(u"资产清单.xls")
sheetDepartment = excel.sheet_by_name(u"部门")
sheetSupplier = excel.sheet_by_name(u"供货商")
sheetDetail = excel.sheet_by_name(u"固定资产清单")
for iRow in range(sheetDepartment.nrows):
d = Department(name=sheetDepartment.cell(iRow, 0).value)
db.session.add(d)
db.session.commit()
for iRow in range(sheetSupplier.nrows):
c = Supplier(name=sheetSupplier.cell(iRow, 0).value)
db.session.add(c)
for iRow in range(4, sheetDetail.nrows):
if sheetDetail.cell(iRow, 1).ctype == 0:
print(sheetDetail.cell(iRow, 0).value)
break
supplier = Supplier.query.filter(Supplier.name == sheetDetail.cell(iRow, 2).value).first()
print(sheetDetail.cell(iRow, 1).ctype)
# print None if supplier is None else supplier.id
print(iRow)
asset = Asset(code=sheetDetail.cell(iRow, 1).value,
name=sheetDetail.cell(iRow, 5).value,
supplier_id=None if supplier is None else supplier.id,
manager_type_id=ManagerType.query.filter(
ManagerType.name == sheetDetail.cell(iRow, 3).value).first().id,
asset_category_id=AssetCategory.query.filter(
AssetCategory.name == sheetDetail.cell(iRow, 4).value).first().id,
purchase_date=xlrd.xldate.xldate_as_datetime(sheetDetail.cell(iRow, 7).value, 0).date(),
original_value=Decimal(sheetDetail.cell(iRow, 8).value),
depreciation_year=int(sheetDetail.cell(iRow, 9).value),
department_id=Department.query.filter(
Department.name == sheetDetail.cell(iRow, 10).value).first().id,
position=sheetDetail.cell(iRow, 11).value,
remark=sheetDetail.cell(iRow, 12).value
)
db.session.add(asset)
# print Supplier.query.filter(Supplier.name == sheetDetail.cell(iRow, 2).value).first().id
# print xlrd.xldate.xldate_as_datetime(sheetDetail.cell(iRow, 7).value, 0).date()
db.session.commit()
| null | db_init.py | db_init.py | py | 4,262 | python | en | code | null | code-starcoder2 | 51 |
631946645 | # Installation requirements prior to using this rule:
# 1) pip binary: `pip`
# 2) For pypi package with C extensions or system dependecies,
# make sure to build on host with same setup or build in docker
# Binary dependencies needed for pypi repo setup
DEPS = ["pip", "sed", "basename"]
def _execute(ctx, command):
return ctx.execute(["bash", "-c", """
set -ex
%s""" % command])
def _check_dependencies(ctx):
for dep in DEPS:
if ctx.which(dep) == None:
fail("%s requires %s as a dependency. Please check your PATH." % (ctx.name, dep))
_pypi_repository_template = """
# DO NOT EDIT: automatically generated BUILD file for pypi_repository rule
package(default_visibility = ["//visibility:public"])
{pypi_packages}
"""
_pypi_package_template = """
filegroup(
name = '{name}',
srcs = {glob},
)"""
def _install_package(repository_ctx, package_ctx):
# Install package into specific target
print("install %s..." % package_ctx.artifact)
command = "pip install --no-deps --ignore-installed --target={target} {artifact}".format(
target=package_ctx.target,
artifact=package_ctx.artifact,
)
if repository_ctx.attr.conf:
conf = repository_ctx.path(repository_ctx.conf)
command = "PIP_CONFIG_FILE={conf} {pipinstall}".format(
conf=conf,
pipinstall=command,
)
result = _execute(repository_ctx, command)
if result.return_code != 0:
fail("%s\n%s\nFailed to install package %s" % (result.stdout, result.stderr, package_ctx.artifact))
# Generate signature file, which will be used to decide if install is necessary next time
result = _execute(repository_ctx, "echo '%s' > %s" % (package_ctx.artifact, package_ctx.signature))
if result.return_code != 0:
fail("%s\n%s\nFailed to generate signature for %s" % (result.stdout, result.stderr, package_ctx.artifact))
def _create_symlinks(repository_ctx, package_ctx):
# Create symlinks
result = _execute(repository_ctx, "cd %s && ln -s %s/* ." %(package_ctx.current, package_ctx.target))
if result.return_code != 0:
fail("%s\n%s\nFailed to create symlinks for %s" % (result.stdout, result.stderr, package_ctx.artifact))
def _uninstall_package(repository_ctx, package_ctx):
# delete all the symlinks
command = "find %s -maxdepth 1 -mindepth 1 -exec basename {} \;" % package_ctx.target
files_str = _execute(repository_ctx, command).stdout
for file_str in files_str.splitlines():
_execute(repository_ctx, "rm -fr %s/%s" % (package_ctx.current, file_str))
# delete package_ctx.target dir
_execute(repository_ctx, "rm -fr %s" % package_ctx.target)
# delete signature file
_execute(repository_ctx, "rm -fr %s" % package_ctx.signature)
def _dir_exists(repository_ctx, path):
return _execute(repository_ctx, "[[ -d %s ]] && exit 0 || exit 1" % path).return_code == 0
def _file_exists(repository_ctx, path):
return _execute(repository_ctx, "[[ -f %s ]] && exit 0 || exit 1" % path).return_code == 0
def _need_install(repository_ctx, package_ctx):
if not _dir_exists(repository_ctx, package_ctx.target):
return True
if _file_exists(repository_ctx, package_ctx.signature):
signature = _execute(repository_ctx, "cat %s"% package_ctx.signature).stdout
if signature:
signature = signature.strip("\n")
if package_ctx.artifact == signature:
# signature matched, skip install
print("signature matched, skip install of %s" % package_ctx.artifact)
return False
_uninstall_package(repository_ctx, package_ctx)
return True
def _generate_glob_content(repository_ctx, package_ctx):
# return something like glob(['nose_socket_whitelist-1.0.0.egg-info', 'socketwhitelist/**'])
result = "glob(["
command = "find %s -maxdepth 1 -mindepth 1 -type f -exec basename {} \;" % package_ctx.target
files_str = _execute(repository_ctx, command).stdout
for file_str in files_str.splitlines():
result += "'%s'," % file_str
command = "find %s -maxdepth 1 -mindepth 1 -type d -exec basename {} \;" % package_ctx.target
dirs_str = _execute(repository_ctx, command).stdout
for dir_str in dirs_str.splitlines():
result += "'%s/**'," % dir_str
return result + "])"
def _create_package_ctx(repository_ctx, artifact):
# expect artifact in the format of: pytest==2.3.5, nose-socket-whitelist, mock>=1.0.1 etc.
# convert to bazel rule name such as pytest, nose_socket_whitelist, mock etc.
name = _execute(repository_ctx, "echo '%s' | sed 's/[=><].*//'" % artifact).stdout
name = name.strip("\n").replace("-", "_")
# repo root dir
repo = repository_ctx.path(repository_ctx.attr.repo)
repo = _execute(repository_ctx, "dirname %s" % repo).stdout.strip("\n")
return struct(
# package name, such as nose_socket_whitelist
name = name,
# package artifact, such as pytest==2.3.5
artifact = artifact,
repo = repo,
# package actual install directory, such as thirdparty/python/pypirepo/_mock/
target = "%s/%s" % (repo, name),
# package signature file path, such as thirdparty/python/pypirepo/_mock.sig
signature = "%s/%s.sig" % (repo, name),
# current directory, such as <BAZEL_EXTERNAL>/pypi/
current = repository_ctx.path("."),
)
def _pypi_repository_impl(repository_ctx):
# Ensure that we have all of the dependencies installed
_check_dependencies(repository_ctx)
# Install all the packages and generate all the filegroup rules
filegroup_rule_contents = []
for package in repository_ctx.attr.packages:
package_ctx = _create_package_ctx(repository_ctx, package)
if _need_install(repository_ctx, package_ctx):
_install_package(repository_ctx, package_ctx)
_create_symlinks(repository_ctx, package_ctx)
glob_content = _generate_glob_content(repository_ctx, package_ctx)
filegroup_rule_contents.append(_pypi_package_template.format(name=package_ctx.name, glob=glob_content))
# Create final build file
pypi_repo_build_content = _pypi_repository_template.format(pypi_packages="\n".join(filegroup_rule_contents))
repository_ctx.file('BUILD', pypi_repo_build_content, False)
pypi_repository = repository_rule(
implementation=_pypi_repository_impl,
attrs={
# Provide customized pip.conf as needed, default is pip system defaults.
"conf": attr.label(default = None),
# A complete list of pypi packages, including transitive ones, is required in this single file.
# Packages are in the format of: pytest==2.3.5, nose-socket-whitelist, mock>=1.0.1 etc.
"packages": attr.string_list(default = []),
# pypi repo root
"repo": attr.label(default=Label("//thirdparty/python:.pypirepo/placeholder.txt")),
},
local=False,
)
| null | tools/rules/pypi_repository.bzl | pypi_repository.bzl | bzl | 6,929 | python | en | code | null | code-starcoder2 | 51 |
84085395 | # coding=utf-8
# Copyright 2020 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Metric-based learners."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin.tf
from meta_dataset.learners import base as learner_base
from meta_dataset.models import functional_backbones
import tensorflow.compat.v1 as tf
def _compute_prototypes(embeddings, labels):
"""Computes class prototypes over the last dimension of embeddings.
Args:
embeddings: Tensor of examples of shape [num_examples, embedding_size].
labels: Tensor of one-hot encoded labels of shape [num_examples,
num_classes].
Returns:
prototypes: Tensor of class prototypes of shape [num_classes,
embedding_size].
"""
labels = tf.cast(labels, tf.float32)
# [num examples, 1, embedding size].
embeddings = tf.expand_dims(embeddings, 1)
# [num examples, num classes, 1].
labels = tf.expand_dims(labels, 2)
# Sums each class' embeddings. [num classes, embedding size].
class_sums = tf.reduce_sum(labels * embeddings, 0)
# The prototype of each class is the averaged embedding of its examples.
class_num_images = tf.reduce_sum(labels, 0) # [way].
prototypes = class_sums / class_num_images
return prototypes
def compute_prototypes(embeddings, labels):
"""Computes class prototypes over features.
Flattens and reshapes the features if they are not already flattened.
Args:
embeddings: Tensor of examples of shape [num_examples, embedding_size] or
[num_examples, spatial_dim, spatial_dim n_features].
labels: Tensor of one-hot encoded labels of shape [num_examples,
num_classes].
Returns:
prototypes: Tensor of class prototypes of shape [num_classes,
embedding_size].
"""
if len(embeddings.shape) > 2:
feature_shape = embeddings.shape.as_list()[1:]
n_images = tf.shape(embeddings)[0]
n_classes = tf.shape(labels)[-1]
vectorized_embedding = tf.reshape(embeddings, [n_images, -1])
vectorized_prototypes = _compute_prototypes(vectorized_embedding, labels)
prototypes = tf.reshape(vectorized_prototypes, [n_classes] + feature_shape)
else:
prototypes = _compute_prototypes(embeddings, labels)
return prototypes
class MetricLearner(learner_base.EpisodicLearner):
"""A learner that uses a learned distance metric to make predictions."""
def __init__(self, **kwargs):
super(MetricLearner, self).__init__(**kwargs)
# `MetricLearner` subclasses don't require a pre-specified
# output dimensionality.
delattr(self, 'logit_dim')
def forward_pass(self, data):
"""Embeds all (training and testing) images of the episode.
Args:
data: A `meta_dataset.providers.Episode` containing the data for the
episode.
Returns:
The predictions for the query set within the episode.
"""
# Compute the support set's mean and var and use these as the moments for
# batch norm on the query set.
support_embeddings_dict = self.embedding_fn(
data.support_images,
self.is_training,
keep_spatial_dims=self.keep_spatial_dims)
support_embeddings = support_embeddings_dict['embeddings']
support_set_moments = None
if not self.transductive_batch_norm:
support_set_moments = support_embeddings_dict['moments']
query_embeddings_dict = self.embedding_fn(
data.query_images,
self.is_training,
moments=support_set_moments,
keep_spatial_dims=self.keep_spatial_dims,
backprop_through_moments=self.backprop_through_moments)
query_embeddings = query_embeddings_dict['embeddings']
query_logits = self.compute_logits(
support_embeddings,
query_embeddings,
data.onehot_support_labels,
)
return query_logits
def compute_logits(self, support_embeddings, query_embeddings,
onehot_support_labels):
raise NotImplementedError('Abstract method.')
@gin.configurable
class PrototypicalNetworkLearner(MetricLearner):
"""A Prototypical Network."""
keep_spatial_dims = False
def compute_logits(self, support_embeddings, query_embeddings,
onehot_support_labels):
"""Computes the negative distances of each query point to each prototype."""
# [num test images, 1, embedding size].
query_embeddings = tf.expand_dims(query_embeddings, 1)
prototypes = compute_prototypes(support_embeddings, onehot_support_labels)
# [1, num_clases, embedding_size].
prototypes = tf.expand_dims(prototypes, 0)
# Squared euclidean distances between each test embedding / prototype pair.
distances = tf.reduce_sum(tf.square(query_embeddings - prototypes), 2)
return -distances
@gin.configurable
class MatchingNetworkLearner(MetricLearner):
"""A Matching Network."""
keep_spatial_dims = False
def __init__(self, exact_cosine_distance, **kwargs):
"""Initializes the Matching Networks instance.
Args:
exact_cosine_distance: If True then the cosine distance is used, otherwise
the query set embeddings are left unnormalized when computing the dot
product.
**kwargs: Keyword arguments common to all `MetricLearner`s.
"""
self.exact_cosine_distance = exact_cosine_distance
super(MatchingNetworkLearner, self).__init__(**kwargs)
def compute_logits(self, support_embeddings, query_embeddings,
onehot_support_labels):
"""Computes the class logits.
Probabilities are computed as a weighted sum of one-hot encoded training
labels. Weights for individual support/query pairs of examples are
proportional to the (potentially semi-normalized) cosine distance between
the embeddings of the two examples.
Args:
support_embeddings: A Tensor of size [num_support_images, embedding dim].
query_embeddings: A Tensor of size [num_query_images, embedding dim].
onehot_support_labels: A Tensor of size [batch size, way].
Returns:
The query set logits as a [num_query_images, way] matrix.
"""
# Undocumented in the paper, but *very important*: *only* the support set
# embeddings is L2-normalized, which means that the distance is not exactly
# a cosine distance. For comparison we also allow for the actual cosine
# distance to be computed, which is controlled with the
# `exact_cosine_distance` instance attribute.
support_embeddings = tf.nn.l2_normalize(support_embeddings, 1, epsilon=1e-3)
if self.exact_cosine_distance:
query_embeddings = tf.nn.l2_normalize(query_embeddings, 1, epsilon=1e-3)
# [num_query_images, num_support_images]
similarities = tf.matmul(
query_embeddings, support_embeddings, transpose_b=True)
attention = tf.nn.softmax(similarities)
# [num_query_images, way]
probs = tf.matmul(attention, tf.cast(onehot_support_labels, tf.float32))
return tf.log(probs)
@gin.configurable
class RelationNetworkLearner(MetricLearner):
"""A Relation Network."""
keep_spatial_dims = True
def compute_logits(self, support_embeddings, query_embeddings,
onehot_support_labels):
"""Computes the relation score of each query example to each prototype."""
# [n_test, 21, 21, n_features].
query_embed_shape = query_embeddings.shape.as_list()
n_feature = query_embed_shape[3]
out_shape = query_embed_shape[1:3]
n_test = tf.shape(query_embeddings)[0]
# [n_test, num_clases, 21, 21, n_feature].
# It is okay one of the elements in the list to be tensor.
prototypes = compute_prototypes(support_embeddings, onehot_support_labels)
prototype_extended = tf.tile(
tf.expand_dims(prototypes, 0), [n_test, 1, 1, 1, 1])
# [num_clases, n_test, 21, 21, n_feature].
query_f_extended = tf.tile(
tf.expand_dims(query_embeddings, 1),
[1, tf.shape(onehot_support_labels)[-1], 1, 1, 1])
relation_pairs = tf.concat((prototype_extended, query_f_extended), 4)
# relation_pairs.shape.as_list()[-3:] == [-1] + out_shape + [n_feature*2]
relation_pairs = tf.reshape(relation_pairs,
[-1] + out_shape + [n_feature * 2])
relationnet_dict = functional_backbones.relation_module(relation_pairs)
way = tf.shape(onehot_support_labels)[-1]
relations = tf.reshape(relationnet_dict['output'], [-1, way])
return relations
def compute_loss(self, onehot_labels, predictions):
"""Computes the MSE loss of `predictions` with respect to `onehot_labels`.
Args:
onehot_labels: A `tf.Tensor` containing the the class labels; each vector
along the class dimension should hold a valid probability distribution.
predictions: A `tf.Tensor` containing the the class predictions,
interpreted as unnormalized log probabilities.
Returns:
A `tf.Tensor` representing the average loss.
"""
mse_loss = tf.losses.mean_squared_error(onehot_labels, predictions)
regularization = tf.reduce_sum(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = mse_loss + regularization
return loss
| null | meta_dataset/learners/metric_learners.py | metric_learners.py | py | 9,685 | python | en | code | null | code-starcoder2 | 51 |
368834505 | from matplotlib.pyplot import *
import scipy.special as sp
def plt_3d(x, y, z):
fig = figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z, label='title',cmap='viridis', edgecolor='none')
show()
def integral(n, step_f, rs_f, ys_f, rs_F):
# first dimension - x
r_2d = np.broadcast_to(rs_f[:, np.newaxis], (n, n))
# second dimension - u
u_2d = np.broadcast_to(rs_F[np.newaxis, :], (n, n))
# J0(kr) * r
A = sp.j0(u_2d * r_2d) * r_2d
# scale rows by f(x)
A = A * np.broadcast_to(ys_f[:, np.newaxis], (n, n))
int_weights = np.ones(n)
int_weights[0] = 1 / 2
int_weights[-1] = 1 / 2
int_weights *= step_f
# scale rows by int_weights
A = A * np.broadcast_to(int_weights[:, np.newaxis], (n, n))
ys_F = np.sum(A, axis=0)
return ys_F
def draw_2d(sp_n, sp_m, sp_c, xs, ys, s):
extent = [xs[0], xs[-1], xs[0], xs[-1]]
subplot(sp_n, sp_m, sp_c)
imshow(np.abs(ys), extent=extent)
colorbar()
title(f'$\\left|{s}\\right|$')
def get_2d(F, shape, dtype):
F2d = np.zeros(shape, dtype=dtype)
for i in range(shape[0]):
for j in range(shape[1]):
F2d[i][j] = F[j + i * shape[1]]
return F2d
n = 50
alpha = 6.0
beta = 6.0
x = np.linspace(-np.pi, np.pi, n)
y = np.linspace(-np.pi, np.pi, n)
x2d, y2d = np.meshgrid(x, y)
f = lambda r: np.exp((-r ** 2) / beta) * (np.sin(alpha * r) ** 2)
r2d = np.sqrt(np.sqrt(x2d ** 2 + y2d ** 2))
f2d = f(r2d)
figure(figsize=(8, 6))
#plt_3d(x2d, y2d, np.abs(f2d))
draw_2d(2, 2, 1, x, f2d, 'sourse f')
r = r2d.ravel()
F = integral(r.shape[0], abs(r[1] - r[0]), r, f2d.ravel(), r)
F2d = get_2d(F, x2d.shape, np.complex128)
#plt_3d(x2d, y2d, np.abs(F2d))
draw_2d(2, 2, 2, x, F2d, 'my Hankel')
show() | null | lab2package/main3.py | main3.py | py | 1,776 | python | en | code | null | code-starcoder2 | 51 |
305755709 | #!/usr/bin/python3
#Imports
import argparse
import os
import pyvisgraph as vg
import svggen
import minkowski
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def numtotime(num):
num = round(abs(num))
hours = num // 3600
minutes = (num % 3600) // 60
seconds = (num % 3600) % 60
return hours, minutes, seconds
#Commandline-Argumente parsen
parser = argparse.ArgumentParser(description="Lösung zu Lisa rennt, Aufgabe 1, Runde 2, 37. BwInf von Lukas Rost")
parser.add_argument('-i', action="store",dest="input",default="lisarennt1.txt",help="Eingabedatei")
parser.add_argument('-o',action="store",dest="output",default="lisarennt1_output.txt",help="Ausgabedatei")
parser.add_argument('-so', action="store",dest="svg",default="lisarennt1_svg.svg",help="SVG-Ausgabedatei")
parser.add_argument('-d',action="store_true",default=False,dest="debug",help="Debug-Ausgaben aktivieren")
parser.add_argument('-vlisa',action="store",dest="velocity_lisa",default=15,type=float,help="Erweiterung Geschwindigkeiten: Lisa in km/h")
parser.add_argument('-vbus',action="store",dest="velocity_bus",default=30,type=float,help="Erweiterung Geschwindigkeiten: Bus in km/h")
parser.add_argument('-minkowski',action="store",default=None,help="Erweiterung Minkowski-Summe: Eingabedatei (1 Polygon im gleichen Format wie in der normalen Eingabe)")
args = parser.parse_args()
#Geschwindigkeiten in m/s umrechnen
real_v_lisa = round(args.velocity_lisa / 3.6 ,3)
real_v_bus = round(args.velocity_bus / 3.6 ,3)
#Maximale x und y für Darstellung
maxx = 0
maxy = 0
# Polygone einlesen
infile = open(args.input,'r')
numpoly = int(infile.readline())
polylist = []
for i in range(numpoly):
pointlist = []
line = infile.readline().split(" ")
line = [float(x) for x in line]
index = 1
for j in range(int(line[0])):
maxx = max(maxx,line[index])
maxy = max(maxy,line[index+1])
pointlist.append(vg.Point(line[index],line[index+1],polygon_id=("P" + str(i+1))))
index += 2
polylist.append(pointlist)
#Lisas Position einlesen
pos = infile.readline().split(" ")
pos = [float(x) for x in pos]
lisa = vg.Point(pos[0],pos[1],polygon_id="L")
infile.close()
maxx = max(maxx,pos[0])
maxy = max(maxy,pos[1])
#Erweiterung Minkowski-Summe
if args.minkowski is not None:
minfile = open(args.minkowski,'r')
lisa_poly = []
line = minfile.readline().split(" ")
minfile.close()
line = [float(x) for x in line]
index = 1
for j in range(int(line[0])):
lisa_poly.append(vg.Point(-line[index],-line[index+1]))
index += 2
polylist = minkowski.minkowski_sum_list(polylist,lisa_poly)
#Graph erstellen und Algorithmus ausführen
graph = vg.VisGraph(real_v_lisa,real_v_bus,lisa)
graph.build(polylist)
path,mintime,min_bus_time, minpoint,dist_minpoint = graph.shortest_path()
#Debug-Ausgaben
if args.debug:
outpath = os.path.dirname(args.input) + "/out/debug/" + os.path.basename(args.input).split(".")[0]
ensure_dir(outpath)
svgfile = open(outpath + "-visgraph.svg","w")
svgfile.write(svggen.gen_vis_svg(graph.get_visgraph(),polylist,lisa,maxx+200,maxy+500))
svgfile.close()
#Ausgabe SVG
svgfile = open(args.svg,"w")
svgfile.write(svggen.gen_output_svg(path,polylist,lisa,maxx+200,maxy+500))
svgfile.close()
#Ausgabe Text
outtext = ""
hours, minutes, seconds = numtotime(mintime)
# Normalfall: Startzeit vor 7.30
if mintime < 0:
hours = 7 - hours
minutes = 30 - minutes
if seconds != 0:
minutes -= 1
seconds = 60 - seconds
# Wenn Startzeit nach 7.30
else:
hours = 7 + hours
minutes = 30 + minutes
bhours, bminutes, bseconds = numtotime(min_bus_time)
bhours = 7 + bhours
bminutes = 30 + bminutes
outtext += "Lisa startet um {:02d}:{:02d}:{:02d} und erreicht den Bus um {:02d}:{:02d}:{:02d}.\n".format(int(round(hours)), int(round(minutes)), int(round(seconds)),int(round(bhours)), int(round(bminutes)), int(round(bseconds)))
outtext += "Sie trifft bei der y-Koordinate {} auf den Bus.\n".format(minpoint.y)
outtext += "Die Route dauert {:0.2f} Minuten und ist {:0.2f} Meter lang.\n".format(dist_minpoint/(real_v_lisa*60),dist_minpoint)
outtext += "Die Route besteht aus folgenden Punkten:\n"
for point in path:
outtext += "{} {} {}\n".format(point.x,point.y,point.real_polygon_id)
outfile = open(args.output,"w")
outfile.write(outtext)
outfile.close() | null | Aufgabe1-Implementierung/main.py | main.py | py | 4,478 | python | en | code | null | code-starcoder2 | 51 |
248284098 | import numpy as np
import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
class Job(object):
"""
A class used to bundle train/test data together with the model to be fit.
Attributes
----------
model : torch.nn.Module
Pytorch model to fit
loaders : dict
Contains data loaders for train/test data
device :
Checks whether GPUs are available
verbose : bool
Controls verbosity of the output
Methods
-------
train_model
Trains the model over the data yielded by the training loader
test_model
Tests the model over the data yielded by the test loader
get_losses
Evaluates the loss function over the train and test sets
"""
def __init__(self, model, loader_train, loader_test, verbose=True):
"""
Parameters
----------
model : torch.nn.Module
Pytorch model to optimize
loader_train : torch.utils.data.DataLoader
Data loader for training data
loader_test : torch.utils.data.DataLoader
Data loader for testing data
verbose : bool
Controls verbosity of the output
"""
assert all([model, loader_train, loader_test]), \
'Model and loaders must not be None.'
self.model = model
self.loaders = {
'train': loader_train,
'test': loader_test
}
# Check for GPU acceleration
self.device = torch.device("cuda:0"
if torch.cuda.is_available()
else "cpu")
# Job settings
self.verbose = verbose
def get_losses(self, criterion=nn.CrossEntropyLoss()):
"""Evaluate loss function over train and test sets.
Parameters
----------
criterion : Loss function
Criterion to use for minimization
Returns
-------
train_loss : float
Average loss evaluated over the training set
test_loss : float
Average loss evaluated over the test set
"""
self.model.eval()
with torch.no_grad():
train_loss = np.array([
criterion(self.model(inputs), labels).item()
for (inputs, labels) in self.loaders['train']
]).mean()
test_loss = np.array([
criterion(self.model(inputs), labels).item()
for (inputs, labels) in self.loaders['test']
]).mean()
return train_loss, test_loss
def train_model(self,
opt=optim.Adam,
criterion=nn.CrossEntropyLoss(),
epochs=3,
lr=0.0001,
stride_print=1000,
training_curves=False,
dir_data=None):
"""Train the model.
Parameters
----------
opt : Pytorch Optimizer object
Optimizer to use
criterion : Loss function
Criterion to use for minimization
epochs : int
Number of epochs for training
lr : float
Learning rate to pass to the optimizer
training_curves : bool
Whether to generate and save loss curves
dir_data :
Path to directory in which to save loss data
"""
if not (self.loaders['train'] and self.loaders['test']):
raise AttributeError('Data loaders have not been initialized.')
# Whether to save loss data
if training_curves:
assert dir_data is not None, 'Specify where to save loss data.'
assert os.path.exists(dir_data), 'Specified directory does not exist.'
losses = []
# Instantiate optimizer and set model to train mode
optimizer = opt(self.model.parameters(), lr=lr)
# Train and monitor loss
# Note: Structure mirrors Pytorch tutorial @
# https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
for i_epoch in range(epochs):
self.model.train()
running_loss = 0.0
for i_data, data in enumerate(self.loaders['train']):
# Evaluate outputs, perform backprop, and take a step
inputs, labels = data
optimizer.zero_grad()
outputs = self.model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
# Monitor progress
if i_data % stride_print == stride_print - 1 and self.verbose:
print('[%d, %5d] loss: %.3f' %
(i_epoch + 1, i_data + 1, running_loss / stride_print))
sys.stdout.flush()
running_loss = 0.0
if training_curves:
losses.append( self.get_losses(criterion=criterion) )
if training_curves:
losses = np.array(losses)
fn_save = os.path.join(dir_data, 'loss_curves.npy')
np.save(fn_save, losses)
def test_model(self):
"""Evaluate the model over the test set and print accuracy."""
# Set eval mode
self.model.eval()
# Accumulate stats
total, correct = 0, 0
with torch.no_grad():
for data in self.loaders['test']:
inputs, labels = data
outputs = self.model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
# Print accuracy
acc = 100 * correct / total
print('Accuracy of the network on the 10000 test images: %d %%' % (acc))
| null | aether/job.py | job.py | py | 5,876 | python | en | code | null | code-starcoder2 | 51 |
308379667 | import random
from datetime import datetime
# Stores Music library and returns appropriate songs
RED_LOW = range(1000,1004)
RED_MID = range(1,60)
RED_HIGH = range(61,85)
ORANGE_LOW = []
ORANGE_MID = [1, 8, 12]
ORANGE_HIGH = [7, 11]
YELLOW_LOW = []
YELLOW_MID = [5]
YELLOW_HIGH = [6]
GREEN_LOW = []
GREEN_MID = [7]
GREEN_HIGH = [8]
BLUE_LOW = []
BLUE_MID = [9]
BLUE_HIGH = [10]
PURPLE_LOW = []
PURPLE_MID = [11]
PURPLE_HIGH = [12]
WHITE_LOW = []
WHITE_MID = [13]
WHITE_HIGH = [14]
MIDS = [RED_MID, ORANGE_MID, YELLOW_MID, GREEN_MID, BLUE_MID, PURPLE_MID, WHITE_MID]
HIGHS = [RED_HIGH, ORANGE_HIGH, YELLOW_HIGH, GREEN_HIGH, BLUE_HIGH, PURPLE_HIGH, WHITE_HIGH]
LOWS = [RED_LOW, ORANGE_LOW, YELLOW_LOW, GREEN_LOW, BLUE_LOW, PURPLE_LOW, WHITE_LOW]
MEDITATIONS = []
SET_THEME = 0
def find_low(theme=datetime.today().weekday()):
if SET_THEME >= 0:
theme = SET_THEME
return random.choice(LOWS[theme])
def find_mid(theme=datetime.today().weekday()):
if SET_THEME >= 0:
theme = SET_THEME
elif datetime.now().hour >= 21:
all_mids = [song for sublist in MIDS for song in sublist]
return random.choice(all_mids)
return random.choice(MIDS[theme])
def find_high(theme=datetime.today().weekday()):
if SET_THEME >= 0:
theme = SET_THEME
elif datetime.now().hour >= 21:
all_highs = [song for sublist in HIGHS for song in sublist]
return random.choice(all_highs)
return random.choice(HIGHS[theme])
def get_meditation(this_time=datetime.now()):
if this_time.hour < 12:
return MEDITATIONS[this_time.day * 2]
return MEDITATIONS[1 + this_time.day * 2]
"""
if this_time.day == 0:
return MEDITATIONS[0];
if this_time.day == 1:
return MEDITATIONS[2];
if this_time.day == 2:
return MEDITATIONS[4];
if this_time.day == 3:
return MEDITATIONS[];
if this_time.day == 0
return MEDITATIONS[0];
if this_time.day == 0
return MEDITATIONS[0];
if this_time.day == 0
return MEDITATIONS[0];
""" | null | songs.py | songs.py | py | 2,109 | python | en | code | null | code-starcoder2 | 51 |
245101287 | # Given a string containing only digits, restore it by returning all possible valid IP address combinations.
#
# Example:
#
# Input: "25525511135"
# Output: ["255.255.11.135", "255.255.111.35"]
# 这个题可以运用dfs,那么回溯算法的循环和终止条件是什么呢?
#
# IP地址由四部分构成,可以设置一个变量segment,当segment = 4时,可结束循环,将结果添加到列表中;
#
# 每个部分数值均值0---255之间,因此每次回溯最多需要判断3个元素,即当前元素i---i+2这三位数字。
class Solution:
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
def dfs(segment, counter, ip):
if counter == 4:
if segment == '': self.result.append(ip[1:])
return
for i in range(1, 4):
if i <= len(segment):
if int(segment[:i]) <= 255:
dfs(segment[i:], counter + 1, ip + "." + segment[:i])
if segment[0] == '0': break
self.result = []
dfs(s, 0, '')
return self.result
| null | src/93_Restore_IP_Addresses.py | 93_Restore_IP_Addresses.py | py | 1,167 | python | en | code | null | code-starcoder2 | 51 |
25023949 | import requests
from iNaturalist.Common import convertToObservationResults
def get_all_observations_for_taxon(taxon_id):
observations = []
page_num = 1
while True:
url = 'https://api.inaturalist.org/v1/observations?taxon_id=' + str(taxon_id) + '&per_page=200&order=desc&order_by=created_at&page=' + str(page_num)
json_results = requests.get(url).json()
per_page = int(json_results["per_page"])
num_results = len(json_results["results"])
for observation in convertToObservationResults(json_results["results"]):
observations.append(observation)
if (per_page != num_results):
return observations
page_num += 1 | null | iNaturalist/ApiRequests.py | ApiRequests.py | py | 711 | python | en | code | null | code-starcoder2 | 51 |
398243948 | from src.classes.dataProcessor import dataProcessor
from src.classes.kNN import kNN as kNN
data = dataProcessor()
learningData = data.processData('./data/iris.data.learning')
testingData = data.processData('./data/iris.data.test')
X_test = data.deleteLabels(testingData)
kNN = kNN(3,learningData)
unsetLabels = kNN.predict(X_test)
print("Finall score: ",kNN.score(testingData,unsetLabels))
print("Accuracy: ",(kNN.score(testingData,unsetLabels)/len(testingData))*100)
| null | src/data/src/main.py | main.py | py | 470 | python | en | code | null | code-starcoder2 | 51 |
43097129 | #!/usr/bin/env python
# encoding: utf-8
import os
import inspect
import subprocess
import asyncio
import datetime
import json
import re
from functools import partial
from operator import is_not
class XCodeBuildArgs(object):
name = None
scheme = None
device = None
config = 'Debug'
udid = None
simulator = None
def __init__(self, *args, **kwargs):
for key in kwargs:
if key == 'name':
self.name = kwargs[key]
elif key == 'scheme':
self.scheme = kwargs[key]
elif key == 'device':
self.device = kwargs[key]
elif key == 'config':
self.config = kwargs[key]
elif key == 'simulator':
self.simulator = kwargs[key]
if self.scheme == None:
self.scheme = self.name
class XCodeProject(object):
name = None
isWorkspace = False
def __init__(self, *args, **kwargs):
for key in kwargs:
if key == 'name':
self.name = kwargs[key]
elif key == 'isWorkspace':
self.isWorkspace = kwargs[key]
@property
def projectType(self):
return 'workspace' if self.isWorkspace else 'project'
class Device(object):
name = None
udid = None
version = None
def __init__(self, args, **kwargs):
for key in args:
if key == 'udid':
self.udid = args[key]
elif key == 'name':
self.name = args[key]
elif key == 'version':
self.version = args[key]
def getBuildPath(configuration, appName, isDevice):
device = 'iphoneos' if isDevice else 'iphonesimulator'
return 'build/Build/Products/{}-{}/{}.app'.format(configuration or 'Debug', 'iphonesimulator', appName)
def findMatchingSimulator(simulators, simulatorName=None):
if simulators['devices'] is None:
return None
devices = simulators['devices']
match = None
for version in devices:
if not version.startswith('iOS'):
continue
for simulator in devices[version]:
if simulator['availability'] != '(available)':
continue
simulator['version'] = version
if simulator['state'] == 'Booted':
if simulatorName != None:
print("We couldn't boot your defined simulator due to an already booted simulator. We are limited to one simulator launched at a time.")
return Device(simulator)
if simulator['name'] == simulatorName:
return Device(simulator)
# Keeps track of the first available simulator for use if we can't find one above.
if simulatorName == None and match is None:
print('find simulator', simulator)
match = Device(simulator)
if match:
return match
return None
def findXcodeProject(files):
sortedFiles = sorted(files)
for (index, fileName) in enumerate(reversed(sortedFiles)):
name, ext = os.path.splitext(fileName)
if ext == '.xcworkspace':
return XCodeProject(name=fileName, isWorkspace=True)
if ext == '.xcodeproj':
return XCodeProject(name=fileName, isWorkspace=False)
return None
def parseIOSDevicesList(text):
def parseLine(line):
device = re.match('(.*?) \((.*?)\) \[(.*?)\]', line)
noSimulator = re.match('(.*?) \((.*?)\) \[(.*?)\] \((.*?)\)', line)
if device and noSimulator and noSimulator.groups().count != 4:
return Device({
'name' : device.groups()[0],
'version' : device.groups()[1],
'udid' : device.groups()[2],
})
print(text)
devices = [parseLine(line) for line in text.split('\n')]
return filter(partial(is_not, None), devices)
def runIOS(args):
files = os.listdir(os.path.dirname(os.path.abspath(__file__)))
xcodeproject = findXcodeProject(files)
if xcodeproject == None:
raise 'Could not find Xcode project files in ios folder'
inferredSchemeName, ext = os.path.splitext(xcodeproject.name)
scheme = args.scheme or inferredSchemeName
print('Found Xcode {} {}'.format(xcodeproject.projectType, xcodeproject.name))
cmd = "xcrun instruments -s"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
try:
output, error = process.communicate()
devices = parseIOSDevicesList(output.decode('utf8'))
except Exception as e:
raise e
if args.device != None:
selectedDevice = matchingDevice(devices, args.device)
return
return selectedDeviceOperation(selectedDevice, args, scheme, xcodeproject, devices)
elif args.udid != None:
return runOnDeviceByUdid(args, scheme, xcodeproject, devices)
else:
co = runOnSimulator(xcodeproject, args, inferredSchemeName, scheme)
try:
co.send(None)
except StopIteration:
print('runIOS FAILED')
def runOnDeviceByUdid(args, scheme, xcodeproject, devices):
selectedDevice = matchingDeviceByUdid(devices, args.udid)
selectedDeviceOperation(selectedDevice, args, scheme, xcodeproject, devices)
def selectedDeviceOperation(selectedDevice, args, scheme, xcodeproject, devices):
if selectedDevice:
loop = asyncio.get_event_loop()
loop.run_until_complete(runOnDevice(selectedDevice, scheme, xcodeproject, args.config))
loop.close()
else:
if devices:
print('Could not find device with the name: "' + args.device + '".')
print('Choose one of the following:')
printFoundDevices(devices)
else:
print('No iOS devices connected.')
async def runOnSimulator(xcodeproject, args, inferredSchemeName, scheme):
try:
cmd = 'xcrun simctl list --json devices'
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
simulators = json.loads(output.decode('utf8'))
except Exception as e:
print('Could not parse the simulator list output')
raise e
selectedSimulator = findMatchingSimulator(simulators, args.simulator)
# selectedSimulator = findMatchingSimulator(simulators)
if selectedSimulator == None:
raise 'Could not find {} simulator'.format(args.simulator)
simulatorFulName = formattedDeviceName(selectedSimulator)
print('launching {}, UDID: {}...'.format(simulatorFulName, selectedSimulator.udid))
try:
cmd = 'xcrun instruments -w {}'.format(selectedSimulator.udid)
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
error = process.communicate()
except Exception as e:
# do nothing:
# instruments always fail with 255 because it expects more arguments,
# but we want it to only launch the simulator
print('')
appName = buildProject(xcodeproject, selectedSimulator.udid, scheme, args.config)
if appName is None:
appName = inferredSchemeName
appPath = getBuildPath(args.config, appName, False)
print('Installing {}'.format(appPath))
try:
cmd = 'xcrun simctl install booted {}'.format(appPath)
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
error = process.communicate()
except Exception as e:
raise e
try:
cmd = '/usr/libexec/PlistBuddy -c Print:CFBundleIdentifier {}'.format(os.path.join(appPath, 'Info.plist'))
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
bundleID = output.decode('utf8').rstrip()
except Exception as e:
raise e
print('launching ' + bundleID)
try:
cmd = 'xcrun simctl launch booted {}'.format(bundleID)
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
except Exception as e:
raise e
async def runOnDevice(selectedDevice, scheme, xcodeproject, configuration):
appName = buildProject(xcodeproject, selectedDevice.udid, scheme, configuration)
if appName is None:
appName = scheme
iosDeployInstallArgs = [
'--bundle', getBuildPath(configuration, appName, True),
'--id' , selectedDevice.udid,
'--justlaunch'
]
try:
iosDeployInstallArgs.insert(0, 'ios-deploy')
print(' '.join(iosDeployInstallArgs))
process = subprocess.Popen(iosDeployInstallArgs, stdout=subprocess.PIPE, universal_newlines=True)
buildOutput = ''
for stdout_line in iter(process.stdout.readline, ""):
print(stdout_line)
buildOutput += stdout_line
process.stdout.close()
return_code = process.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, ' '.join(iosDeployInstallArgs))
except Exception as e:
raise e
if error:
print('')
print('** INSTALLATION FAILED **')
print('Make sure you have ios-deploy installed globally.')
print('(e.g "npm install -g ios-deploy")')
else:
print('** INSTALLATION SUCCEEDED **')
def buildProject(xcodeproject, udid, scheme, configuration = 'Debug'):
xcodebuildArgs = [
'-workspace' if xcodeproject.isWorkspace else '-project', xcodeproject.name,
'-configuration', configuration,
'-scheme', scheme,
'-destination', 'id={}'.format(udid),
'-derivedDataPath', 'build'
]
print('Building using "xcodebuild {}"'.format(' '.join(map(str, xcodebuildArgs))))
try:
xcodebuildArgs.insert(0, 'xcodebuild')
process = subprocess.Popen(xcodebuildArgs, stdout=subprocess.PIPE, universal_newlines=True)
buildOutput = ''
for stdout_line in iter(process.stdout.readline, ""):
print(stdout_line)
buildOutput += stdout_line
process.stdout.close()
return_code = process.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, ' '.join(xcodebuildArgs))
# FULL_PRODUCT_NAME is the actual file name of the app, which actually comes from the Product Name in the build config, which does not necessary match a scheme name, example output line: export FULL_PRODUCT_NAME="Super App Dev.app"
p = re.compile('export FULL_PRODUCT_NAME="?(.+).app')
productNameMatch = p.findall(buildOutput)
if productNameMatch and len(productNameMatch) > 1:
#0 is the full match, 1 is the app name
return productNameMatch[1]
return ('' if error is None else error)
except Exception as e:
raise e
def matchingDevice(devices, deviceName):
if deviceName == True and devices.length == 1:
print('Using first available device {} due to lack of name supplied.'.format(devices[0].name))
return devices[0]
for device in devices:
print(device.name, device.udid, device.version)
if (device.name == deviceName or formattedDeviceName(device) == deviceName):
return device
def matchingDeviceByUdid(devices, udid):
return [device for device in devices if device['udid'] is udid]
def formattedDeviceName(simulator):
return '{} ({})'.format(simulator.name, simulator.version);
def printFoundDevices(devices):
for device in devices:
print("{} udid: {}".format(device.name, device.udid))
# simulator='iPhone 7',
args = XCodeBuildArgs(name = 'SnowFund', device='iPhone 6s')
runIOS(args)
| null | run.py | run.py | py | 11,643 | python | en | code | null | code-starcoder2 | 51 |
237125913 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
搜狐新闻标题爬虫
'''
import requests
from bs4 import BeautifulSoup
# 获取页面内容并存提取保存
res = requests.get('http://news.sina.com.cn/china/')
res.encoding = 'utf-8'
# print(res.text.txt)
soup = BeautifulSoup(res.text, 'lxml')
# print(soup)
# 获取网页 时间,标题,网页
for news in soup.select('.news-item'):
if len(news.select('h2')) > 0:
# print(news.select('h2')) # 有空的怎么办? 上if
h2 = news.select('h2')[0].text
time = news.select('.time')[0].text
a = news.select('a')[0]['href']
print(time, h2, a)
| null | A_库的分类/BeautifulSoup_yhz/实例2 - 搜狐网页提取.py | 实例2 - 搜狐网页提取.py | py | 649 | python | en | code | null | code-starcoder2 | 51 |
595917651 |
# -*- coding: utf-8 -*-
from copy import deepcopy
from scipy.stats import norm
import numpy as np
from sklearn.model_selection import StratifiedKFold, cross_val_predict, KFold
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_auc_score
from base import RANDOM_STATE
###############################################################################
ACCURACY = 'accuracy'
F1 = 'f1_score'
ROC_AUC = 'roc_auc_score'
CONFUSION_MATRIX = 'confusion_matrix'
VALUES_TRUE = 'y_valid'
VALUES_PRED = 'y_pred'
TEST_PREDICTIONS = 'y_test'
HYPERPARAMS = 'hyperparams'
FEATURES = 'features'
OBJECTS = 'objects'
ALL_METRICS = [
ACCURACY, F1,
ROC_AUC,
CONFUSION_MATRIX,
VALUES_TRUE,
VALUES_PRED,
TEST_PREDICTIONS,
HYPERPARAMS,
FEATURES,
OBJECTS
]
ALL_Y_TRUE_Y_PRED_BASED_METRICS = [
ACCURACY, F1,
ROC_AUC,
CONFUSION_MATRIX,
VALUES_TRUE,
VALUES_PRED
]
PLOT_METRICS = [
ACCURACY, F1,
ROC_AUC
]
###############################################################################
class AccuracyLossGetter:
"""Calculate loss function."""
def __call__(self, metrics):
return 1.0 - metrics[ROC_AUC]
class MetricsGetter:
"""Calculate metrics."""
def __init__(self, metrics, loss_func, n_folds):
self._metrics = metrics
self._loss_func = loss_func
self._n_folds = n_folds
def __call__(self, model, X, y, features, objects, X_test=None):
model = deepcopy(model)
metrics = self.get_cv_metrics(
model,
X,
y,
features,
objects,
self._metrics,
self._n_folds,
X_test=X_test,
)
loss = self._loss_func(metrics)
return metrics, loss
def set_folds_count(self, n_folds):
self._n_folds = n_folds
def get_cv_metrics(self, model, X, y, features, objects, metrics, n_folds, X_test=None):
"""Calculate metrics for the model on (X, y) dataset using cross-validation."""
y_pred = cross_val_predict(
model,
X,
y,
cv=KFold(
n_splits=n_folds,
shuffle=True,
random_state=RANDOM_STATE
)
)
# get metrics from training set
result = self.get_y_true_y_pred_based_metrics(y, y_pred, metrics)
# fit model to get features and predictions
model.fit(X, y)
if HYPERPARAMS in metrics:
result[HYPERPARAMS] = model.get_hyperparams()
if FEATURES in metrics:
result[FEATURES] =features[model.get_support(as_indices=True)]
if OBJECTS in metrics:
result[OBJECTS] = objects
if TEST_PREDICTIONS in metrics:
# predictions for X_test
result[TEST_PREDICTIONS] = model.predict(X_test)
return result
def get_y_true_y_pred_based_metrics(self, y_true, y_pred, metrics):
"""Calculate metrics for y_pred, y_true arrays."""
result = dict()
if ACCURACY in metrics:
result[ACCURACY] = accuracy_score(y_true, y_pred)
if F1 in metrics:
result[F1] = f1_score(y_true, y_pred)
if ROC_AUC in metrics:
result[ROC_AUC] = roc_auc_score(y_true, y_pred)
if CONFUSION_MATRIX in metrics:
result[CONFUSION_MATRIX] = confusion_matrix(y_true, y_pred)
if VALUES_TRUE in metrics:
result[VALUES_TRUE] = y_true
if VALUES_PRED in metrics:
result[VALUES_PRED] = y_pred
return result
# .. compare predictions ..
def results_differ_p_value(self, y_true, y1, y2):
y1 = (np.array(y1) == np.array(y_true)).astype(np.float64)
y2 = (np.array(y2) == np.array(y_true)).astype(np.float64)
diff = y1 - y2
norm_stat = diff.mean() / diff.std() * np.sqrt(diff.shape[0])
quantile = norm.cdf(norm_stat)
return min(quantile, 1.0 - quantile)
| null | metrics_getter.py | metrics_getter.py | py | 4,000 | python | en | code | null | code-starcoder2 | 51 |
354093144 | import sys
import pickle
try:
TasksFile = open('tasks.txt', 'rb')
# LoadedTasks contains the tasks and their Done status as a boolean
# it's structured like : [['Task Description',True],['Task2 Description',False]]
LoadedTasks = pickle.load(TasksFile)
except EOFError:
LoadedTasks = []
if len(sys.argv) == 1 :
print("""Command Line Todo application
=============================
Command-line arguments:
-l Lists all the tasks
-a Adds a new task
-r Removes a task
-c Completes a task
""")
elif "-a" in sys.argv:
# Check if the task description is provided after the -a parameter
# before adding it to the list and saving to file
try:
TaskDescription = sys.argv[2]
LoadedTasks.append([TaskDescription,False])
pickle.dump(LoadedTasks, open('tasks.txt', 'wb'))
except IndexError:
print("Unable to add: no task provided")
elif "-l" in sys.argv:
# If the LoadedTasks variable is empty provide the
# correct error message
if not LoadedTasks: #If it's empty
print("No todos for today! :)")
else :
for task in LoadedTasks:
LoadedTaskIndex = str(LoadedTasks.index(task)+ 1)
if task[1]: # If the task is done
print(LoadedTaskIndex + " - [X] " + task[0])
else: # the task has not been completed yet
print(LoadedTaskIndex + " - [ ] " + task[0])
elif "-r" in sys.argv:
try:
TaskNumber = sys.argv[2] # Task number to be removed
except IndexError: # Add error catching
print("Unable to remove: no index provided")
exit()
try:
LoadedTasks.pop(int(TaskNumber) - 1) # Remove the task from the LoadedTasks list
pickle.dump(LoadedTasks, open('tasks.txt', 'wb')) # Write again to file
except IndexError:
print("Unable to remove: index is out of bound")
except ValueError:
print("Unable to remove: index is not a number")
elif "-c" in sys.argv:
try:
TaskNumber = sys.argv[2] # Task number to be checked
except IndexError: # Add error catching
print("Unable to remove: no index provided")
exit()
try:
TaskNumber = int(TaskNumber) - 1
LoadedTasks[TaskNumber][1] = True
pickle.dump(LoadedTasks, open('tasks.txt', 'wb'))
except IndexError:
print("Unable to remove: index is out of bound")
except ValueError:
print("Unable to remove: index is not a number")
else: #If none of the above conditions matched than the wrong argument must be provided
print("Unsupported argument") | null | day-3/main.py | main.py | py | 2,641 | python | en | code | null | code-starcoder2 | 51 |
115320370 | #django
from .models import *
#combine statistics
def all_statistics(clean_sur):
#statistics -- forename
forenames_hist = names_forenames_hist.objects.filter(surname=clean_sur).values('surname','forename','sex')
forenames_cont = names_forenames_cont.objects.filter(surname=clean_sur).values('surname','forename','sex')
fore_female_hist, fore_male_hist = forenames_stats(forenames_hist)
fore_female_cont, fore_male_cont = forenames_stats(forenames_cont)
#statistics -- parish
parishes = names_freq_parish.objects.filter(surname=clean_sur).values('regcnty','parish','conparid')
par_top = parish_stats(parishes)
#statistics -- oa
oas = names_freq_oa.objects.filter(surname=clean_sur).values('msoa11cd')
oa_top = oa_stats(oas)
#statistics -- oac
oac = names_cat_oa.objects.filter(surname=clean_sur).values('oagroupcd','oagroupnm')
oac_mod = oac_stats(oac)
#statistics -- oah
oah = names_health_oa.objects.filter(surname=clean_sur).values('surname','ahah_dec')
oah_mod = oah_stats(oah)
#statistics -- imd
imd = names_imd_oa.objects.filter(surname=clean_sur).values('surname','imd_dec')
imd_mod = imd_stats(imd)
#statistics -- bband
bband = names_bband_oa.objects.filter(surname=clean_sur).values('surname','bbandcd')
bband_mod = bband_stats(bband)
#statistics -- iuc
iuc = names_iuc_oa.objects.filter(surname=clean_sur).values('surname','iuccd','iucnm')
iuc_mod = iuc_stats(iuc)
#statistics -- crvul
crvul = names_crvul_oa.objects.filter(surname=clean_sur).values('surname','crvulcd','crvulnm')
crvul_mod = crvul_stats(crvul)
#return
return(fore_female_hist,fore_male_hist,fore_female_cont,fore_male_cont,par_top,oa_top,oac_mod,oah_mod,imd_mod,bband_mod,iuc_mod,crvul_mod)
#forenames
def forenames_stats(forenames):
if not forenames:
fore_female = ['No forenames found']
fore_male = ['No forenames found']
else:
fore_female = []
fore_male = []
for f in forenames:
if(f['sex'] == 'F'):
fore_female.append(f['forename'])
else:
fore_male.append(f['forename'])
return(fore_female,fore_male)
#parish freq
def parish_stats(parishes):
if not parishes:
par_top = [['99','No parishes found']]
else:
par_top = []
for p in parishes:
regcnty = p['regcnty'].title()
parid = str(int(p['conparid']))
parish = p['parish']
parjoin = []
if parish == '-':
parish = 'London parishes'
parjoin.append(parid)
parjoin.append(regcnty + ': ' + parish)
else:
parjoin.append(parid)
parjoin.append(regcnty + ': ' + parish)
par_top.append(parjoin)
return(par_top)
#oa freq
def oa_stats(oas):
if not oas:
msoa_top = [['99','No MSOA\'s found']]
else:
msoa_top = []
for o in oas:
msoajoin = []
ladnm = lookup_oa.objects.filter(msoa11cd=o['msoa11cd']).values('ladnm','msoa11nm')[0]
msoanm = ladnm['ladnm'] + ': ' + ladnm['msoa11nm']
msoajoin.append(ladnm['msoa11nm'])
msoajoin.append(msoanm)
msoa_top.append(msoajoin)
return(msoa_top)
#oa classification
def oac_stats(oac):
if not oac:
oac_sn = ['No classification found']
oac_gn = ['No classification found']
oac_sg = '99'
else:
oac_gn = oac[0]['oagroupnm']
oac_sg = oac[0]['oagroupcd']
oac_sn = oa_classification.objects.filter(groupcd=oac_sg).values('supergroupnm')[0]['supergroupnm']
return([oac_sn,oac_gn,oac_sg])
#oa health
def oah_stats(oah):
if not oah:
oah_dc = 99
else:
oah_dc = oah[0]['ahah_dec']
return(oah_dc)
#oa imd
def imd_stats(imd):
if not imd:
imd_dc = 99
else:
imd_dc = imd[0]['imd_dec']
return(imd_dc)
#oa broadband
def bband_stats(bband):
if not bband:
bband_sc = 99
else:
bband_sc = bband[0]['bbandcd']
return(bband_sc)
#oa internet users
def iuc_stats(iuc):
if not iuc:
iuc_sc = [99,'No classification found']
else:
iuc_sc = [iuc[0]['iuccd'],iuc[0]['iucnm']]
return(iuc_sc)
#oa consumer vulnerability
def crvul_stats(crvul):
if not crvul:
crvul_sc = [99,'No classification found']
else:
crvul_sc = [crvul[0]['crvulcd'],crvul[0]['crvulnm']]
return(crvul_sc)
| null | kde/statistics.py | statistics.py | py | 4,552 | python | en | code | null | code-starcoder2 | 51 |
643970079 | # Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Remote Python Debugger (pdb wrapper)."""
import pdb
import socket
import sys
__author__ = "Bertrand Janin <b@janin.com>"
__version__ = "0.1.3"
class Rpdb(pdb.Pdb):
def __init__(self, addr="127.0.0.1", port=4444):
"""Initialize the socket and initialize pdb."""
# Backup stdin and stdout before replacing them by the socket handle
self.old_stdout = sys.stdout
self.old_stdin = sys.stdin
# Open a 'reusable' socket to let the webapp reload on the same port
self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
self.skt.bind((addr, port))
self.skt.listen(1)
(clientsocket, address) = self.skt.accept()
handle = clientsocket.makefile('rw')
pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
sys.stdout = sys.stdin = handle
def shutdown(self):
"""Revert stdin and stdout, close the socket."""
sys.stdout = self.old_stdout
sys.stdin = self.old_stdin
self.skt.close()
self.set_continue()
def do_continue(self, arg):
"""Stop all operation on ``continue``."""
self.shutdown()
return 1
do_EOF = do_quit = do_exit = do_c = do_cont = do_continue
| null | hooks/charmhelpers/fetch/python/rpdb.py | rpdb.py | py | 1,910 | python | en | code | null | code-starcoder2 | 51 |
201695972 | ## This code is rewritten based on previous work of
## BRUNEL, Nicolas et WANG, Xiao-Jing.
## "Effects of neuromodulation in a cortical network model of object working memory dominated by recurrent inhibition"
## Journal of computational neuroscience, 2001, vol. 11, no 1, p. 63-85.
## --------------------------------------
## This Python code executes a number of tasks as follows.
# 1. Initializing two neuron groups
# 2. Injecting Poisson input stimuli to two neuron groups
# 3. Ploting the membrane potentials of all neurons
## Latest update: September 4th, 2018
from brian2 import *
import visualization as vis
# populations
N = 100
N_E = int(N * 0.8) # pyramidal neurons
N_I = int(N * 0.2) # interneurons
# voltage
V_L = -70. * mV
V_thr = -50. * mV
V_reset = -60. * mV
V_E = 0. * mV
V_I = -70. * mV
# membrane capacitance
C_m_E = 0.5 * nF
C_m_I = 0.2 * nF
# membrane leak
g_m_E = 25. * nS
g_m_I = 20. * nS
# refractory period
tau_rp_E = 2. * ms
tau_rp_I = 1. * ms
# external stimuli
rate = 3 * Hz
C_ext = 1000
# synapses
C_E = N_E
C_I = N_I
# AMPA (excitatory)
g_AMPA_ext_E = 2.08 * nS
g_AMPA_rec_E = 0.104 * nS * 800. / N_E
g_AMPA_ext_I = 1.62 * nS
g_AMPA_rec_I = 0.081 * nS * 800. / N_E
tau_AMPA = 2. * ms
# NMDA (excitatory)
g_NMDA_E = 0.327 * nS * 800. / N_E
g_NMDA_I = 0.258 * nS * 800. / N_E
tau_NMDA_rise = 2. * ms
tau_NMDA_decay = 100. * ms
alpha = 0.5 / ms
Mg2 = 1.
# GABAergic (inhibitory)
g_GABA_E = 1.25 * nS * 200. / N_I
g_GABA_I = 0.973 * nS * 200. / N_I
tau_GABA = 10. * ms
# modeling
eqs_E = '''
dv / dt = (- g_m_E * (v - V_L) - I_syn) / C_m_E : volt (unless refractory)
I_syn = I_AMPA_ext + I_AMPA_rec + I_NMDA_rec + I_GABA_rec : amp
I_AMPA_ext = g_AMPA_ext_E * (v - V_E) * s_AMPA_ext : amp
I_AMPA_rec = g_AMPA_rec_E * (v - V_E) * 1 * s_AMPA : amp
ds_AMPA_ext / dt = - s_AMPA_ext / tau_AMPA : 1
ds_AMPA / dt = - s_AMPA / tau_AMPA : 1
I_NMDA_rec = g_NMDA_E * (v - V_E) / (1 + Mg2 * exp(-0.062 * v / mV) / 3.57) * s_NMDA_tot : amp
s_NMDA_tot : 1
I_GABA_rec = g_GABA_E * (v - V_I) * s_GABA : amp
ds_GABA / dt = - s_GABA / tau_GABA : 1
'''
eqs_I = '''
dv / dt = (- g_m_I * (v - V_L) - I_syn) / C_m_I : volt (unless refractory)
I_syn = I_AMPA_ext + I_AMPA_rec + I_NMDA_rec + I_GABA_rec : amp
I_AMPA_ext = g_AMPA_ext_I * (v - V_E) * s_AMPA_ext : amp
I_AMPA_rec = g_AMPA_rec_I * (v - V_E) * 1 * s_AMPA : amp
ds_AMPA_ext / dt = - s_AMPA_ext / tau_AMPA : 1
ds_AMPA / dt = - s_AMPA / tau_AMPA : 1
I_NMDA_rec = g_NMDA_I * (v - V_E) / (1 + Mg2 * exp(-0.062 * v / mV) / 3.57) * s_NMDA_tot : amp
s_NMDA_tot : 1
I_GABA_rec = g_GABA_I * (v - V_I) * s_GABA : amp
ds_GABA / dt = - s_GABA / tau_GABA : 1
'''
P_E = NeuronGroup(N_E, eqs_E, threshold='v > V_thr', reset='v = V_reset', refractory=tau_rp_E, method='euler')
P_E.v = V_L
P_I = NeuronGroup(N_I, eqs_I, threshold='v > V_thr', reset='v = V_reset', refractory=tau_rp_I, method='euler')
P_I.v = V_L
eqs_glut = '''
s_NMDA_tot_post = w * s_NMDA : 1 (summed)
ds_NMDA / dt = - s_NMDA / tau_NMDA_decay + alpha * x * (1 - s_NMDA) : 1 (clock-driven)
dx / dt = - x / tau_NMDA_rise : 1 (clock-driven)
w : 1
'''
eqs_pre_glut = '''
s_AMPA += w
x += 1
'''
eqs_pre_gaba = '''
s_GABA += 1
'''
eqs_pre_ext = '''
s_AMPA_ext += 1
'''
duration = .1*second
## Initialization of neuron connection
# E to E
C_E_E = Synapses(P_E, P_E, model=eqs_glut, on_pre=eqs_pre_glut, method='euler')
C_E_E.connect('i!=j')
C_E_E.w[:] = 1
# E to I
C_E_I = Synapses(P_E, P_I, model=eqs_glut, on_pre=eqs_pre_glut, method='euler')
C_E_I.connect()
C_E_I.w[:] = 1
# I to E_1
C_I_E = Synapses(P_I, P_E, on_pre=eqs_pre_gaba, method='euler')
C_I_E.connect(p=0.2)
# I to I
C_I_I = Synapses(P_I, P_I, on_pre=eqs_pre_gaba, method='euler')
C_I_I.connect('i != j')
# external noise
C_P_E = PoissonInput(P_E, 's_AMPA_ext', C_ext, rate, '1')
C_P_I = PoissonInput(P_I, 's_AMPA_ext', C_ext, rate, '1')
## Monitoring
E_mon = SpikeMonitor(P_E)
I_mon = SpikeMonitor(P_I)
E_sta = StateMonitor(P_E,'v',record=True)
I_sta = StateMonitor(P_I,'v',record=True)
LFP_E = PopulationRateMonitor(P_E)
LFP_I = PopulationRateMonitor(P_I)
# ##############################################################################
# # Simulation run
# ##############################################################################
run(duration)
################################################################################
# Analysis and plotting
################################################################################
figure(1)
subplot(211)
plot(E_mon.t/ms, E_mon.i,'.',c='C1',label='Excitatory synapses group')
xlabel('Time (ms)')
ylabel('Neuron index')
legend()
subplot(212)
plot(I_mon.t/ms, I_mon.i,'.',c='C2',label='Inhibitory synapses group')
xlabel('Time (ms)')
ylabel('Neuron index')
legend()
suptitle('Raster plot neurons')
figure(2)
subplot(211)
for idx in range(N_E):
plot(E_sta.t/ms, E_sta.v[idx])
xlabel('Time (ms)')
ylabel('Membrane potential (mV)')
subplot(212)
for idx in range(N_I):
plot(I_sta.t/ms, I_sta.v[idx])
xlabel('Time (ms)')
ylabel('Membrane potential (mV)')
suptitle('Spike traces of excitatory synapses group and inhibitory synapses group')
figure(3)
subplot(211)
plot(LFP_E.t/ms, LFP_E.smooth_rate(window='flat',width=.002*second)/Hz,label='Excitatory synapses group')
xlabel('Time (ms)')
ylabel('Firing rate (Hz)')
legend()
subplot(212)
plot(LFP_I.t/ms, LFP_I.smooth_rate(window='flat',width=.002*second)/Hz,label='Inhibitory synapses neuron')
xlabel('Time (ms)')
ylabel('Firing rate (Hz)')
legend()
show() | null | LIF/Excitatory_inhibitory_model.py | Excitatory_inhibitory_model.py | py | 5,484 | python | en | code | null | code-starcoder2 | 51 |
185476884 | # Studentnumber : 1716390
# Class : V2C
class TrieWord:
"""
Constructor of the TrieWord class.
Word is the value of the TrieNode.
Frequency is the amount of times that word occurs in the Trie.
"""
def __init__(self, word, frequency):
self.word = word
self.frequency = frequency
class TrieNode:
"""
Constructor of the TrieNode class.
Value is the value of the node in the Trie.
Frequency is the amount of times that sequence of values occurs in the Trie.
Childs is all the nodes under the current node.
"""
def __init__(self, value=None, frequency=0, childs=[]):
self.value = value
self.frequency = frequency
self.childs = childs
"""
Method to get a specific TrieNode using the value of the sequence of nodes.
Parameters
----------
value : unknown
The value you want to search in the tree.
This is the whole sequence of nodes.
valueIndex : integer
The current index in the value we want to check if it's the value of the node.
Returns
-------
Child/None
Returns a child when a child with the given sequence of the value is found.
If not found the method returns None
"""
def getChildUsingValue(self, value, valueIndex):
if valueIndex < len(value):
for child in self.childs:
if child.value == value[valueIndex]:
return child
return None
"""
Method to add a new child to the node.
Parameters
----------
child : TrieNode
The TrieNode you want to add to the childs of this TrieNode.
Returns
-------
None
"""
def addChild(self, child):
self.childs.append(child)
"""
Method to get a specific TrieNode using the value of the sequence of nodes.
Parameters
----------
value : unknown
The value you want to insert in the tree.
This is the whole sequence of nodes.
valueIndex : integer
The current index in the value we want to check if it's the value of the node.
Returns
-------
Boolean
Returns True a sequence of nodes is found containing the given value.
"""
def insert(self, value, valueIndex=0):
if valueIndex == len(value):
self.frequency += 1
return True
child = self.getChildUsingValue(value, valueIndex)
if child:
child.insert(value, valueIndex + 1)
else:
newChild = TrieNode(value[valueIndex], 0 , [])
self.addChild(newChild)
newChild.insert(value, valueIndex + 1)
"""
Method to search a TrieNode using the value of the sequence of nodes.
Parameters
----------
value : unknown
The value you want to search in the tree.
This is the whole sequence of nodes.
valueIndex : integer
The current index in the value we want to check if it's the value of the node.
Returns
-------
Child/None
Returns a child when a child with the given sequence of the value is found.
"""
def search(self, value, valueIndex=0):
if valueIndex == len(value):
return self
child = self.getChildUsingValue(value, valueIndex)
if child:
return child.search(value, valueIndex + 1)
else:
return None
"""
Method to get all the words with their frequency in the Trie.
Parameters
----------
value : unknown
The value you want to search in the tree.
This is the whole sequence of nodes.
valueIndex : integer
The current index in the value we want to check if it's the value of the node.
Returns
-------
Child/None
Returns a child when a child with the given sequence of the value is found.
"""
def getWords(self, word=[], words=[]):
if self.value != None:
word.append(self.value)
if self.frequency:
words.append(TrieWord(''.join(word), self.frequency))
if self.childs:
for child in self.childs:
child.getWords(word)
word.pop()
return words
class Trie:
"""
Constructor of the Trie class.
Root is the root of the Trie
"""
def __init__(self):
self.root = TrieNode()
"""
Method to insert a new value into the Trie.
Parameters
----------
value : unknown
The value you want to insert in the tree.
This is the whole sequence of nodes.
Returns
-------
None
"""
def insert(self, value):
value = value.lower()
self.root.insert(value)
"""
Method to search a TrieNode using the value of the sequence of nodes.
Parameters
----------
value : unknown
The value you want to search in the tree.
This is the whole sequence of nodes.
Returns
-------
TrieNode/None
Returns a TrieNode when the value has been found. Otherwise it will return None.
"""
def search(self, value):
value = value.lower()
return self.root.search(value)
"""
Method to get all the words and frequencies in a Trie.
Returns
-------
list
Returns a list of TrieWords.
"""
def getWords(self):
return self.root.getWords()
"""
Method to add all the words to the tree from a given file.
Parameters
----------
inputFile : str
The inputFile is the path to the file where you want to get all the words of.
Returns
-------
None
"""
def addWordsFromFile(self, inputFile):
inputFile = open(inputFile, "r")
text = inputFile.readlines()
inputFile.close()
text = ''.join(text)
startWordIndex = 0
endWordIndex = 0
currentIndex = 0
while currentIndex < len(text):
currentCharacter = text[currentIndex]
if not currentCharacter.isalpha():
endWordIndex = currentIndex
word = text[startWordIndex : endWordIndex].lower()
self.insert(word)
while currentIndex < len(text) and not text[currentIndex].isalpha():
currentIndex += 1
startWordIndex = currentIndex
currentIndex += 1
"""
Method to export all the words with their frequencies to a file.
Parameters
----------
outputFile : str
The outputFile is the path to the file where you want to store all the words and frequencies.
Returns
-------
Child/None
Returns a child when a child with the given sequence of the value is found.
"""
def exportTreeToFile(self, outputFile):
outputFile = open(outputFile, "w")
words = self.getWords()
for trieWord in words:
outputFile.write(str(trieWord.word) + " " + str(trieWord.frequency) + "\n")
outputFile.close()
if __name__ == "__main__":
boompie = Trie()
boompie.insert("Test")
boompie.insert("test2")
boompie.insert("appelaaneenstok") | null | Week_3/trie.py | trie.py | py | 6,131 | python | en | code | null | code-starcoder2 | 51 |
190995763 | import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import pandas as pd
df = pd.read_csv('data.csv')
app = dash.Dash()
year_options = []
for year in df['year'].unique():
year_options.append({'label':str(year), 'value':year})
app.layout = html.Div(
[
dcc.Graph(
id = 'graph'
),
dcc.Dropdown(
id='year-picker',
options=year_options,
value=df['year'].min()
)
]
)
@app.callback(
Output('graph','figure'),
[Input('year-picker','value')]
)
def update_figure(selected_year):
filtered_df = df[df['year'] == selected_year]
traces = []
for continent_name in filtered_df['continent'].unique():
df_by_continent = filtered_df[filtered_df['continent'] == continent_name]
traces.append(
go.Scatter(
x = df_by_continent['gdpPercap'],
y = df_by_continent['lifeExp'],
text=df_by_continent['country'],
mode='markers',
marker={'size': 15},
opacity=0.7
)
)
return {
'data': traces,
'layout': go.Layout(
title='Scatter Plot',
xaxis={
'title': 'GDP Per Capita',
'type': 'log'
},
yaxis={
'title': 'Life Expectency'
}
)
}
if __name__ == '__main__':
app.run_server(debug=True)
| null | dash_basic/4_dash_with_realdata.py | 4_dash_with_realdata.py | py | 1,638 | python | en | code | null | code-starcoder2 | 51 |
67070939 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
from bdinterface import BDInterface
class UserManagerDB(object):
def __init__(self):
self.bd = BDInterface()
def get_user(self, email):
try:
self.bd.connect()
u = self.bd.getUsuario(email)
if len(u) > 0:
return User(*u[1:])
finally:
self.bd.disconnect()
def insert_user(self, user):
try:
self.bd.connect()
self.bd.insertarUsuario(user.email,user.cigarsPerDay,user.cigarsPerPacket,user.pricePerPacket,user.stopSmokingDate,user.totalUnsmokedCigars,
user.totalMoneySaved,user.totalTimeSaved,user.totalDaysClean)
finally:
self.bd.disconnect()
class UserManager(object):
def __init__(self):
self.user_list = []
def get_user(self,email):
for user in self.user_list:
if user.email == email:
return user
return None
def insert_user(user):
self.user_list.append(user)
class User(object):
def __init__(self, email, cigarsPerDay, cigarsPerPacket, pricePerPacket, stopSmokingDate, totalUnsmokedCigars=20, totalMoneySaved=44.90, totalTimeSaved=20, totalDaysClean =20):
self.userId = 0
self.email = email
self.cigarsPerDay = cigarsPerDay
self.cigarsPerPacket = cigarsPerPacket
self.pricePerPacket = pricePerPacket
self.stopSmokingDate = stopSmokingDate
self.totalUnsmokedCigars = totalUnsmokedCigars
self.totalMoneySaved = totalMoneySaved
self.totalTimeSaved = totalTimeSaved
self.totalDaysClean =totalDaysClean
def __userget_to_json (self,userId,cigarsPerday,cigarsPerPacket,pricerPerPacket,stopSmokingDate,totalUnsmokedCigars,totalMoneySaved,totalTimeSaved,totalDaysClean):
return '\
{\
"userId": '+str(userId)+',\
"cigarsPerDay": '+str(cigarsPerday)+',\
"cigarsPerPacket": '+str(cigarsPerPacket)+',\
"pricerPerPacket": '+str(pricerPerPacket)+',\
"stopSmokingDate": "'+str(stopSmokingDate)+'",\
"totalUnsmokedCigars": '+str(totalUnsmokedCigars)+',\
"totalMoneySaved": '+str(totalMoneySaved)+',\
"totalTimeSaved": '+str(totalTimeSaved)+',\
"totalDaysClean": '+str(totalDaysClean)+'\
} '
def to_json(self):
return self.__userget_to_json(self.userId, self.cigarsPerDay, self.cigarsPerPacket, self.pricePerPacket, self.stopSmokingDate, self.totalUnsmokedCigars, self.totalMoneySaved, self.totalTimeSaved, self.totalDaysClean)
def __eq__(self,other):
try:
if self.email == other.email:
return True
else:
return False
except:
return False
def log(self):
pass
def tips_to_json(tipId, text):
'''Converts tip to json.
Args:
tipId:
text:
Returns:
Json formated string.
'''
return '{"tipId":'+str(tipId)+', "text":"'+str(text)+'"}'
def get_tips():
'''Get tip from database.
Returns:
Tuple (tipId, text).
'''
return (148,"Quita cigarrillos y ceniceros del automovil")
def log_to_json(_id, userId, day, cigarsSmoked, ok):
return '{\
"id": '+str(_id)+',\
"userId": '+str(userId)+',\
"is_ok": '+str(ok).lower()+',\
"day": "'+day+'",\
"smokedCigars": '+str(cigarsSmoked)+'\
}'
def get_log():
'''Get log from database.
Returns:
Tuple (id, userId, day, cigarsSmoked, ok)
'''
_id=123465
userId=33002
day="2015-02-20T18:25:43+01:00"
cigarsSmoked=12
ok=True
return (_id, userId, day, cigarsSmoked, ok)
users = UserManagerDB() | null | wsgi/model.py | model.py | py | 3,774 | python | en | code | null | code-starcoder2 | 51 |
146222736 | # Faça um programa que leia uma quantidade indeterminada de números positivos e conte quantos deles estão nos seguintes intervalos: [0-25], [26-50], [51-75] e [76-100]. A entrada de dados deverá terminar quando for lido um número negativo.
a, b, c, d = 0, 0, 0, 0
n = 0
while (n > -1):
n = int(input('Informe um valor de 0-100 ou um número negativo para sair: '))
if (n>-1 and n<26):
a+=1
elif (n>25 and n<51):
b+=1
elif (n>50 and n<76):
c+=1
elif (n>75 and n<101):
d+=1
else:
print('Valor maior que 100. Valor não contabilizado.')
print('[0-25] = {}'.format(a))
print('[26-50] = {}'.format(b))
print('[51-75] = {}'.format(c))
print('[76-100] = {}'.format(d)) | null | Repetition/42.py | 42.py | py | 738 | python | en | code | null | code-starcoder2 | 51 |
455684647 | def unCaesar(huruf,n):
a = ord((huruf))
b = a - n
c = chr(b)
return c
while True:
print("="*50)
print("A. Masukan file terenskripsi sandi caesar")
print("B. Ubah menjadi normal")
print("C. Selesai")
first = str(input("Pilihan : "))
print("-"*50)
try:
if (first == "A") or (first == "a"):
cari = str(input("Masukan file: (contoh: e:\\__.txt) : "))
myfile = open(cari,"r")
myfile.close()
print("\nFile ditemukan, silahkan lanjutkan ke opsi B\n")
elif (first == "B") or (first == "b"):
print("\n!!! Jika bilangan di enskripsikan 2, maka masukan 2")
print(" Jangan tambahkan negatif. !!!\n")
geser = int(input("Enskripsikan sejauh? (Bil. Bulat) : "))
n = geser % 26
save = str(input("File akan disimpan di? Contoh : e:\\___.txt : "))
myfile = open(cari,"r")
output = open(save,"w")
char = myfile.read()
charUp= char.upper()
listChar = list(charUp)
for teks in listChar:
angka = ord(teks)
if (teks.isalpha()) and (angka - n >= 65):
output.write(unCaesar(teks,n))
elif (teks.isalpha()) and (angka - n < 65):
x = angka - n
y = 65 - x
new = 91 - y
output.write(chr(new))
else:
output.write(" ")
myfile.close()
output.close()
print("\nSukses! Silahkan buka di",save,"untuk melihat hasilnya.\n")
elif (first == "C") or (first == "c"):
print("TERIMA KASIH")
break
else:
print("Pilihlah sesuai opsi yang tersedia.")
except NameError:
print("\nAnda harus mencari file text pada opsi A terlebih dahulu")
except IndexError:
print("\nAngka yang anda masukan pada opsi A tidak sesuai format. Pilih SELESAI, kemudian mulai ulang program.")
except PermissionError:
print("\nMasukan lokasi penyimpanan sesuai lokal disk yang tersedia dan jangan masukkan pada disk system.")
except FileNotFoundError:
print("\nDirektori penyimpanan tidak bisa ditemukan.")
except ValueError:
print("\nBilangan yang anda masukkan tidak valid") | null | Praktikum 10/7.py | 7.py | py | 2,410 | python | en | code | null | code-starcoder2 | 51 |
46435043 | # Given N numbers: the first number in the input is N, after that N integers are given.
# Count the number of zeros among the given integers and print it.
# You need to count the number of numbers that are equal to zero, not the number of zero digits.
sum=0
for i in range (int(input())):
i=int(input())
if i==0:
sum+=1
print(sum) | null | Simple Examples/The number of zeros.py | The number of zeros.py | py | 348 | python | en | code | null | code-starcoder2 | 51 |
549900055 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The BBMQ server is required to accept 2 connections. one from the producer and one from the consumer.
Each topic will have one queue. Topic is an abstraction basically for a queue. The name 'Topic' is inspired from apache kafka
Producer: Publisher of the messages
Consumer: Subscriber of the messages
Communication between the connection thread and the main thread. It uses a simple queue for communication purposes. Whenever a new connection is established, its details will be stored in the queue, for whatever number of new connections
"""
import socket
import logging, logging.config
import threading
import sys, os
import ast
import traceback
import Queue
import datetime
import signal
# --------------------------Custom imports------------------------------------------
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import settings
from bbmq import BBMQ
from partition_messages import Message
from message import BaseMessage
USE_DB = settings.USE_DB
if USE_DB:
import models
from models import ModelManager
from models import Queue as QueueModel
from models import Message as MessageModel
LOG_FILEPATH = settings.LOG_FILEPATH
LOG_LEVEL = settings.LOG_LEVEL
SERVER_MAX_QUEUED_CON = settings.SERVER_MAX_QUEUED_CON
TOPICS = settings.TOPICS
CLIENT_PUBLISHER = settings.CLIENT_PUBLISHER
CLIENT_SUBSCRIBER = settings.CLIENT_SUBSCRIBER
MAX_MESSAGE_SIZE = settings.MAX_MESSAGE_SIZE
SERVER_ACKNOWLEDGEMENT = settings.SERVER_ACKNOWLEDGEMENT
CLIENT_SHUTDOWN_SIGNAL = settings.CLIENT_SHUTDOWN_SIGNAL
CONSUMER_REQUEST_WORD = settings.CONSUMER_REQUEST_WORD
INVALID_PROTOCOL = settings.INVALID_PROTOCOL
EMPTY_QUEUE_MESSAGE = settings.EMPTY_QUEUE_MESSAGE
PRODUCER_ACK_MESSAGE = settings.PRODUCER_ACK_MESSAGE
CLOSE_CONNECTION_SIGNAL = settings.CLOSE_CONNECTION_SIGNAL
HEAD = settings.HEAD
TAIL = settings.TAIL
PARTITION_SIZE = settings.PARTITION_SIZE
logging.config.dictConfig(settings.LOGGING)
logger = logging.getLogger("bbmq_server_module")
class ProducerThread(threading.Thread):
"""
Connection thread will be waiting for connections from producers or consumers
"""
def __init__(self, producer_socket, inbound_socket_address, queue, topic_name):
"""
initialize the thread. During initialization of this thread, it must confirm to the
producer that the producer can now start communication
:param producer_socket:
:param inbound_socket_address:
:param queue:
:param topic_name:
"""
threading.Thread.__init__(self)
self.logger = logging.getLogger("ProducerThread")
self.logger.debug("Initializing Producer Thread for socket adddress: {}".format(
inbound_socket_address))
self.socket = producer_socket
self.queue = queue
self.topic_name = topic_name
self.socket.send(SERVER_ACKNOWLEDGEMENT)
if USE_DB:
self.session = ModelManager.create_session(models.engine)
self.queue_object = self.session.query(QueueModel).filter(QueueModel.name == topic_name).first()
def run(self):
"""
run the thread. called when the start() method of Thread super class is called
:return:
"""
msg = None
msg_body = None
try:
while True:
try:
# The Queue will only store the message and thats all.
msg = BaseMessage(message="")
msg_body = BaseMessage(message="")
while True:
part = self.socket.recv(PARTITION_SIZE)
msg.append(part)
self.logger.debug("message now: ")
self.logger.debug(msg)
has_tail, message_tail = msg.has_message_tail()
has_head, message_head = msg.has_message_head()
if has_tail:
self.logger.debug("TAIL received for message")
msg_body.append(message_tail)
break
elif has_head:
self.logger.debug("HEAD received for message")
if msg_body.equals(CLIENT_SHUTDOWN_SIGNAL):
logger.info("CLIENT_SHUTDOWN_SIGNAL recieved")
logger.info("Closing the connection with the producer")
self.logger.debug("Packetizing CLOSE_CONNECTION_SIGNAL")
close_con_signal = Message(CLOSE_CONNECTION_SIGNAL)
for packet in close_con_signal:
self.socket.send(packet)
del(close_con_signal)
break
else:
self.logger.debug("Received payload")
if USE_DB:
self.queue_object.message.append(MessageModel(is_fetched=False, content=msg_body,
publish_timestamp=datetime.datetime.utcnow(),
consumed_timestamp=datetime.datetime.utcnow()))
ModelManager.commit_session(self.session)
self.logger.info("Written to database")
self.logger.debug("Publishing to queue")
# The message is simply added to the queue
self.logger.debug("Enqueuing message: " )
self.logger.debug(msg_body)
self.queue.add_message(msg_body)
self.logger.info("Sending producer acknowledgement")
self.logger.debug("Packetizing PRODUCER_ACK_MESSAGE")
producer_ack_message = Message(PRODUCER_ACK_MESSAGE)
for packet in producer_ack_message:
self.socket.send(packet)
except Exception:
stack = traceback.format_exc()
self.logger.error(stack)
raise Exception
except Exception:
self.logger.error("Socket Error. Check the logs to know more")
exc_type, exc_val, exc_tb = sys.exc_info()
stack = traceback.format_exc()
self.logger.error(stack)
traceback.print_exception(exc_type, exc_val, exc_tb)
finally:
self.logger.debug("Deleting msg_body and msg if exists")
if msg:
del(msg)
if msg_body:
del(msg_body)
if USE_DB:
self.logger.info("Closing database session")
ModelManager.close_session(self.session)
self.logger.info("Closing socket: {} for queue: {}".format(self.socket,
self.topic_name))
self.socket.close()
self.logger.info("Killing Producer Thread for socket: {} and queue: {}".format(
self.socket, self.topic_name))
class ConsumerThread(threading.Thread):
"""
Connection thread will be waiting for connections from producers or consumers
"""
def __init__(self, consumer_socket, inbound_socket_address, queue, topic_name):
"""
initialize the thread
:param consumer_socket:
:param inbound_socket_address:
:param queue:
:param topic_name:
"""
threading.Thread.__init__(self)
self.logger = logging.getLogger("ConsumerThread")
self.logger.debug("Initializing Consumer Thread for socket address: {}".format(
inbound_socket_address))
self.socket = consumer_socket
self.queue = queue
self.topic_name = topic_name
self.socket.send(SERVER_ACKNOWLEDGEMENT)
def run(self):
"""
run the thread. called when the start() method of Thread super class is called
:return:
"""
msg = None
msg_body = None
try:
while True:
try:
msg = BaseMessage(message="")
msg_body = BaseMessage(message="")
while True:
part = self.socket.recv(PARTITION_SIZE)
msg.append(part)
has_tail, msg_tail = msg.has_message_tail()
has_head, msg_head = msg.has_message_head()
if has_tail:
self.logger.debug("TAIL received for message")
msg_body.append(msg_tail)
break
elif has_head:
self.logger.debug("HEAD received for message")
if msg_body.equals(CLIENT_SHUTDOWN_SIGNAL):
self.logger.info("CLIENT_SHUTDOWN_SIGNAL recieved")
# the close connection signal has to be sent using packets
packets = Message(CLOSE_CONNECTION_SIGNAL)
self.logger.info("Sending CLOSE_CONNECTION_SIGNAL")
self.logger.debug("Packetizing CLOSE_CONNECTION_SIGNAL")
for packet in packets:
self.socket.send(packet)
break
if msg_body.equals(CONSUMER_REQUEST_WORD):
self.logger.debug("Received request for new message")
self.logger.debug("Fetching from queue")
queue_message = self.queue.fetch_message(block=True)
queue_message = Message(message=str(queue_message))
self.logger.info("Dequeued message: " + str(queue_message))
self.logger.debug("Packetizing message from queue")
for packet in queue_message:
self.socket.send(packet)
# TODO: Add response from client after receiving message
# TODO: Store the message id of the message in the queue for proper replacement
if USE_DB:
self.logger.info("Updating database")
self.logger.info("Starting session")
self.session = ModelManager.create_session(models.engine)
self.queue_object = self.session.query(QueueModel).filter(QueueModel.name ==
self.topic_name).first()
message_objs = self.session.query(MessageModel).filter(MessageModel.content.ilike(str(
queue_message))).all()
for message_obj in message_objs:
if not message_obj.is_fetched:
message_obj.is_fetched = True
break
ModelManager.commit_session(self.session)
self.logger.info("Database updated")
self.logger.info("Closing database session")
ModelManager.close_session(self.session)
else:
self.socket.send(HEAD)
self.socket.send(INVALID_PROTOCOL)
self.socket.send(TAIL)
except Exception:
stack = traceback.format_exc()
self.logger.error(stack)
raise Exception
except Exception:
self.logger.error("Socket Error. Check the logs to know more")
exc_type, exc_val, exc_tb = sys.exc_info()
stack = traceback.format_exc()
self.logger.error(stack)
traceback.print_exception(exc_type, exc_val, exc_tb)
finally:
self.logger.debug("Deleting msg and msg_body if exists")
if msg:
del(msg)
if msg_body:
del(msg_body)
if USE_DB:
if self.session:
self.logger.info("Closing database session")
ModelManager.close_session(self.session)
self.logger.info("Closing socket: {} for queue: {}".format(self.socket,
self.topic_name))
self.socket.close()
self.logger.info("Killing Consumer Thread for socket: {} and queue: {}".format(
self.socket, self.topic_name))
class ConnectionThread(threading.Thread):
"""
Connection thread will be waiting for connections from producers or consumers
"""
def __init__(self, server_socket, connection_queue, topics):
"""
initialize the thread
:param server_socket:
:param connection_queue:
:param topics: list of available topics that clients can publish/subscribe to
"""
threading.Thread.__init__(self)
self.logger = logging.getLogger("ConnectionThread")
self.sock = server_socket
self.connection_queue = connection_queue
self.topics = topics
def run(self):
"""
run the thread. called when the start() method of Thread super class is called
:return:
"""
while True:
client_metadata, socket_connection, inbound_socket_address = self.connect()
# client_metadata is a string representation of a dictionary containing 2 fields
# one for "type" which can be a producer or consumer and another being
# "topic" specifying the topic the client wants to publish/subscribe
try:
client_type = ast.literal_eval(client_metadata)["type"]
client_topic = ast.literal_eval(client_metadata)["topic"]
if client_topic not in self.topics:
self.logger.info("Client '{}' has subscribed to a non-existing"
" topic {}".format(inbound_socket_address, client_topic))
socket_connection.close()
continue
if client_type == CLIENT_PUBLISHER:
self.logger.info("Client is a producer and will publish to queue:"
" {}".format(client_topic))
elif client_type == CLIENT_SUBSCRIBER:
self.logger.info("Client is a consumer and will subscribe to queue:"
" {}".format(client_topic))
else:
self.logger.info("Client type not defined. Closing the connection")
socket_connection.close()
continue
self.logger.debug("Client data pushed to connection queue")
self.connection_queue.put({
"client_type": client_type,
"client_topic": client_topic,
"socket": socket_connection,
"inbound_socket_address": inbound_socket_address
})
except Exception:
self.logger.error("Error in Connection Thread. Check the logs for the"
" Traceback")
exc_type, exc_val, exc_tb = sys.exc_info()
stack = traceback.format_exc()
self.logger.error(stack)
traceback.print_exception(exc_type, exc_val, exc_tb)
def join(self, timeout=None):
"""
join the thread after closing the socket
:param timeout:
:return:
"""
self.logger.info("Closing Server socket")
self.sock.close()
threading.Thread.join()
def connect(self):
"""
connect to the socket
:return:
"""
# the return value of accept() is a tuple c, addr where c is a new socket object
# usable to send and receive data on the other end of the connection and addr is the
# address bound to the socket at the other end of the connection
self.logger.info("Waiting for connection from clients")
socket_connection, inbound_socket_address = self.sock.accept()
# client_type can be a producer or a consumer
client_metadata = socket_connection.recv(1024)
self.logger.info("Connection received from client: {}".format(inbound_socket_address))
return client_metadata, socket_connection, inbound_socket_address
class BBMQServer(object):
"""
BBMQ server to connect to
"""
def __init__(self):
"""
initialize the instance of BBMQ. create the socket, bind the hostname and port with
the socket and listen for the connections to the socket
"""
self.logger = logging.getLogger("bbmq_server_module")
self.sock = socket.socket()
self.hostname = socket.gethostname()
self.port = settings.PORT
self.sock.bind((self.hostname, self.port))
self.sock.listen(SERVER_MAX_QUEUED_CON)
self.topics = {}
self.connection_thread = None
self.connection_queue = Queue.Queue()
# store the instances of all the threads.
self.all_client_threads = {
"connection_threads":[],
"producer_threads": [],
"consumer_threads": []
}
def create_topic(self, topic_name):
"""
create a new topic with the name. returns -1 if the topic is already available
:param topic_name:
:return:
"""
if topic_name in self.topics.keys():
return -1
self.logger.info("creating topic: {}".format(topic_name))
self.topics[topic_name] = {
"queue": None,
"producers": [],
"consumers": []
}
return 0
def get_topic_queue(self, topic_name):
"""
gets the queue instance for a topic
:param topic_name:
:return:
"""
if topic_name not in self.topics.keys():
return -1
return self.topics[topic_name]["queue"]
def update_topic(self, topic_name, producer, consumer):
"""
update the topic with new producers and consumers
:param topic_name:
:param producers: tuple ()
:param consumers: tuple ()
:return:
"""
if producer == None:
self.topics[topic_name]["consumers"].append(consumer)
else:
self.topics[topic_name]["producers"].append(producer)
def create_queue(self):
"""
create an custom queue instance and return it
:return:
"""
queue = BBMQ()
return queue
def spawn_connection_thread(self):
"""
This method will spawn a thread to listen for new connections from new producers or
consumers
:return:
"""
self.logger.debug("Starting connection thread")
self.connection_thread = ConnectionThread(self.sock, self.connection_queue,
self.topics.keys())
self.all_client_threads["connection_threads"].append(self.connection_thread)
self.connection_thread.start()
def spawn_producer_thread(self, producer_socket, inbound_socket_address, queue,
topic_name):
"""
spawns a producer thread to publish to the queue
:param inbound_socket_address:
:param queue:
:return:
"""
producer_thread = ProducerThread(producer_socket, inbound_socket_address, queue,
topic_name)
self.logger.debug("Starting producer thread for socket: {} and queue: {}".format(
inbound_socket_address, queue))
self.all_client_threads["producer_threads"].append(producer_thread)
producer_thread.start()
def spawn_consumer_thread(self, consumer_socket, inbound_socket_address, queue,
topic_name):
"""
spawns a consumer thread to subscribe to the queue
:param inbound_socket_address:
:param queue:
:return:
"""
consumer_thread = ConsumerThread(consumer_socket, inbound_socket_address, queue,
topic_name)
self.logger.debug("Starting consumer thread for socket: {} and queue: {}".format(
inbound_socket_address, queue))
self.all_client_threads["consumer_threads"].append(consumer_thread)
consumer_thread.start()
def join_connection_thread(self):
"""
join the connection thread
:return:
"""
self.logger.debug("Joining Connection thread")
self.connection_thread.join()
| null | bbmq/server/bbmq_server.py | bbmq_server.py | py | 21,218 | python | en | code | null | code-starcoder2 | 51 |
70092208 | import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}',r'\usepackage{siunitx}'] #
from scipy.optimize import curve_fit
x = np.array([17.869,15.306,13.840,12.707,11.889,11.181,10.575,10.041,9.598,9.190])
y = np.array([1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0])
x = 1/x**2
plt.xlabel(r'$\frac{1}{T^2}\:/\:\si{\per\square\second}$')
plt.ylabel(r'$B\:/\:\si{\tesla}$')
plt.plot(x, y,"b.",label="Messdaten")
def g(x,m,b):
return b+m*x
popt, pcov = curve_fit(g, x, y)
print("Steigung =",popt[0],"Abschnitt =", popt[1])
perr = np.sqrt(np.diag(pcov))
print("Fehler =", perr)
def f(x):
return popt[0]*x+popt[1]
plt.plot(x, f(x), 'r-', label=r'Fit')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig('plot.pdf')
| null | 102/Werte/plot.py | plot.py | py | 845 | python | en | code | null | code-starcoder2 | 51 |
580470912 | import unittest
import gold as target
class Tester(unittest.TestCase):
def test_ex(self):
adjacency = {
0: [2],
1: [1],
2: [0, 3, 4],
3: [2, 4],
4: [2, 3, 6],
5: [6],
6: [4, 5]
}
self.assertEqual(target.calc(adjacency), 2)
if __name__ == '__main__':
unittest.main()
| null | 12/test_gold.py | test_gold.py | py | 384 | python | en | code | null | code-starcoder2 | 51 |
359484083 | # coding=utf-8
from __future__ import absolute_import
from .routes import urlpatterns
from utils.verify import verify_token
from errors.base_errors import APIError
from flask import Blueprint, request, current_app
from utils.base_utils import make_json_response, route_inject
bp_name = "user"
user_api_endpoints = [
"{}.delete_token".format(bp_name),
"{}.set_alias".format(bp_name),
"{}.get_alias".format(bp_name)
]
blueprint = Blueprint(bp_name, __name__)
route_inject(blueprint, urlpatterns)
@blueprint.before_request
def before_request():
if request.endpoint in user_api_endpoints:
verify_token(current_app.config.get("DEBUG"))
@blueprint.errorhandler(APIError)
def blueprint_api_err(err):
return make_json_response(err)
| null | server/blueprints/user/main.py | main.py | py | 762 | python | en | code | null | code-starcoder2 | 51 |
5341423 | import numpy
import matplotlib.pyplot as plt
import torch
def plot_head_map(mma, target_labels, source_labels):
fig, ax = plt.subplots()
heatmap = ax.pcolor(mma, cmap=plt.cm.Blues)
# put the major ticks at the middle of each cell
ax.set_xticks(numpy.arange(mma.shape[1]) + 0.5, minor=False)
ax.set_yticks(numpy.arange(mma.shape[0]) + 0.5, minor=False)
# without this I get some extra columns rows
# http://stackoverflow.com/questions/31601351/why-does-this-matplotlib-heatmap-have-an-extra-blank-column
ax.set_xlim(0, int(mma.shape[1]))
ax.set_ylim(0, int(mma.shape[0]))
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
# source words -> column labels
ax.set_xticklabels(source_labels, minor=False)
# target words -> row labels
ax.set_yticklabels(target_labels, minor=False)
plt.xticks(rotation=45)
# plt.tight_layout()
plt.show()
# column labels -> target words
# row labels -> source words
attns = torch.load('tools/alignment_train.pkl')
with open('data/rotowire/roto-sent-data.train.src', encoding='utf-8') as src_f, \
open('data/rotowire/roto-sent-data.train.tgt', encoding='utf-8') as tgt_f:
for idx, (line_src, line_tgt, attn) in enumerate(zip(src_f, tgt_f, attns)):
srcs = line_src.strip().split()
tgts = line_tgt.strip().split() + ['</s>']
plot_head_map(attn.cpu().numpy(), tgts, srcs)
if idx >= 5:
break
| null | tools/visualize_attention.py | visualize_attention.py | py | 1,488 | python | en | code | null | code-starcoder2 | 51 |
550881477 |
import pytest
from swarm64_tpc_toolkit import stats
@pytest.fixture
def stats_fixture():
netdata_url = 'http://fake-netdata:19999'
disk = 'some_disk'
return stats.Stats(netdata_url, disk)
def test_make_columns():
metrics = ['foo', 'bar']
columns_expected = [*stats.BASE_COLUMNS]
columns_expected.extend([f'{metric}_bar' for metric in stats.STATS_METRICS])
columns_expected.extend([f'{metric}_foo' for metric in stats.STATS_METRICS])
columns = stats.Stats.make_columns(metrics)
assert sorted(columns) == sorted(columns_expected)
# def query_netdata(self, start, end):
def test_query_netdata(mocker, stats_fixture):
start = 123
end = 456
response_value = 'some fancy response'
def get_return_json():
return response_value
requests_get_mock = mocker.patch('requests.get')
requests_get_mock.return_value.json = get_return_json
netdata_data = stats_fixture.query_netdata(start, end)
for idx, chart_key in enumerate(stats_fixture.charts.keys()):
_, _, kwargs = requests_get_mock.mock_calls[idx]
assert chart_key == kwargs['params']['chart']
assert chart_key in netdata_data
assert netdata_data[chart_key] == response_value
def test_transform(stats_fixture):
data = {chart_id: {
'labels': ['foo', 'bar'],
'data': [[1, 2]],
} for chart_id in stats_fixture.chart_ids}
data = stats_fixture.transform(data)
header_expected = []
for chart_id in stats_fixture.chart_ids:
header_expected.append(chart_id + '.foo')
header_expected.append(chart_id + '.bar')
assert data[0] == header_expected
assert data[1] == [1, 2] * 4
| null | tests/test_stats.py | test_stats.py | py | 1,694 | python | en | code | null | code-starcoder2 | 50 |
318817187 | #!/bin/python3
# https://www.hackerrank.com/challenges/alternating-characters/problem
import sys
def alternatingCharacters(s):
# Complete this function
num_delete = 0
c_curr = s[0]
for c in s[1:]:
if c == c_curr:
num_delete += 1
c_curr = c
return num_delete
q = int(input().strip())
for a0 in range(q):
s = input().strip()
result = alternatingCharacters(s)
print(result)
| null | hackerrank/algorithms/strings/alternating_characters.py | alternating_characters.py | py | 435 | python | en | code | null | code-starcoder2 | 50 |
394416571 | from django.conf.urls import include, url
from myapp.api.views import User1View,User1DetailView,User1LoginView
app_name ='myapp'
urlpatterns=[
url(r'^$',User1View.as_view(),name='user'),
# url(r'^upload/',views.upload,name='upload'),
url(r'^login/',User1LoginView.as_view(), name='login'),
# url(r'^logout/$',views.logout1, name='logout'),
# url(r'^registeration/', core_views.signup, name='registeration'),
# url(r'^(?P<job>.+)$',User1DetailView.as_view(), name='download'),
]
| null | myapp/api/urls.py | urls.py | py | 508 | python | en | code | null | code-starcoder2 | 50 |
564267520 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import division
import os
import pytest
from astropy.table import Table
import numpy as np
from mica.archive import aca_l0, asp_l1
from Ska.Numpy import interpolate
has_l0_2012_archive = os.path.exists(os.path.join(aca_l0.CONFIG['data_root'], '2012'))
@pytest.mark.skipif('not has_l0_2012_archive', reason='Test requires 2012 L0 archive')
def test_l0_images_meta():
"""
Confirm meta values match reference/regress values
"""
imgs = aca_l0.get_l0_images(467055635, 467055639, slot=7)
assert imgs[0].meta == {'BGDAVG': 253,
'IMGCOL0': 7,
'IMGFUNC1': 2,
'IMGROW0': -12,
'IMGSIZE': 8,
'IMGSTAT': 0,
'IMGSCALE': 1025,
'INTEG': np.float32(1.696),
'TIME': np.float64(467055637.49031752)}
has_l0_2007_archive = os.path.exists(os.path.join(aca_l0.CONFIG['data_root'], '2007'))
has_asp_l1 = os.path.exists(os.path.join(asp_l1.CONFIG['data_root']))
@pytest.mark.skipif('not has_l0_2007_archive or not has_asp_l1', reason='Test requires 2007 L0 archive')
def test_get_l0_images():
"""
Do a validation test of get_l0_images:
- Get 20 mins of image data for slot 6 of obsid 8008 (very nice clean stars)
- Do first moment centroids in row and col
- Compare to aspect pipeline FM centroids for same slot data
This is a deep test that all the signs are right. If not then everything
breaks badly because the star image doesn't move in sync with row0, col0.
"""
start = '2007:002:06:00:00'
stop = '2007:002:06:20:00'
imgs = aca_l0.get_l0_images(start, stop, slot=6)
files = asp_l1.get_files(8008, content=['ACACENT'])
acen = Table.read(files[0])
# Pick FM centroids for slot 6
ok = (acen['alg'] == 1) & (acen['slot'] == 6)
acen = acen[ok]
# Row and col centroids
rcs = []
ccs = []
times = [img.TIME for img in imgs]
# Easy way to do FM centroids with mgrid
rw, cw = np.mgrid[0:6, 0:6]
# rw = [[0, 0, 0, 0, 0, 0],
# [1, 1, 1, 1, 1, 1],
# [2, 2, 2, 2, 2, 2],
# [3, 3, 3, 3, 3, 3],
# [4, 4, 4, 4, 4, 4],
# [5, 5, 5, 5, 5, 5]]
for img in imgs:
norm = np.sum(img)
rcs.append(np.sum(img * rw) / norm + img.row0)
ccs.append(np.sum(img * cw) / norm + img.col0)
rcen = interpolate(acen['cent_i'], acen['time'], times)
ccen = interpolate(acen['cent_j'], acen['time'], times)
assert np.all(np.abs(rcen - rcs) < 0.05)
assert np.all(np.abs(ccen - ccs) < 0.05)
@pytest.mark.skipif('not has_l0_2007_archive or not has_asp_l1', reason='Test requires 2007 L0 archive')
def test_get_slot_data_8x8():
"""
Do a validation test of get_l0_images:
- Get 20 mins of image data for slot 6 of obsid 8008 (very nice clean stars)
- Do first moment centroids in row and col
- Compare to aspect pipeline FM centroids for same slot data
This is a deep test that all the signs are right. If not then everything
breaks badly because the star image doesn't move in sync with row0, col0.
"""
start = '2007:002:06:00:00'
stop = '2007:002:06:20:00'
slot_data = aca_l0.get_slot_data(start, stop, slot=6, centered_8x8=True)
files = asp_l1.get_files(8008, content=['ACACENT'])
acen = Table.read(files[0])
# Pick FM centroids for slot 6
ok = (acen['alg'] == 1) & (acen['slot'] == 6)
acen = acen[ok]
# Row and col centroids
times = slot_data['TIME']
# Easy way to do FM centroids with mgrid
rw, cw = np.mgrid[0:8, 0:8]
img_raw = slot_data['IMGRAW'] # np.round(slot_data['IMGRAW']).astype(int)
norm = np.sum(img_raw, axis=(1, 2))
rcs = np.sum(img_raw * rw, axis=(1, 2)) / norm + slot_data['IMGROW0'] - 1
ccs = np.sum(img_raw * cw, axis=(1, 2)) / norm + slot_data['IMGCOL0'] - 1
rcen = interpolate(acen['cent_i'], acen['time'], times)
ccen = interpolate(acen['cent_j'], acen['time'], times)
assert np.all(np.abs(rcen - rcs) < 0.05)
assert np.all(np.abs(ccen - ccs) < 0.05)
| null | mica/archive/tests/test_aca_l0.py | test_aca_l0.py | py | 4,259 | python | en | code | null | code-starcoder2 | 50 |
142022268 | '''Module used to communicate and interact with player.
import as c'''
import pygame
from pygame.locals import *
import globvar as g
import utilities as u
class Interaction:
'''Class representing an interaction with the player'''
def __init__(self, interaction_type, question_str, text_str, min_char_count=0):
self.type = interaction_type
self.question_str = question_str
self.text_str = text_str
self.min_char_count = min_char_count
def run(self, target, background, notification=False, insist_answer=False):
inside_rect = (10, 10, 680, 355)
line_pos = (130, 170)
question_pos = (90, 75)
question_size = (500, 100)
if notification is False:
text_pos = (110, 270)
text_size = (500, 80)
else:
text_pos = (110, 150)
text_size = (500, 200)
yes_pos = (175, 170)
no_pos = (435, 170)
yes_rect = Rect((300, 340), (100, 60))
no_rect = Rect((550, 340), (80, 60))
enter_pos = (192, 210)
esc_pos = (430, 210)
char_count_pos = (353, 220)
surface_size = (700, 375)
surface_pos = (130, 172)
surface = pygame.Surface(surface_size)
if background is None:
background = target.copy()
overlay = pygame.Surface((g.WINDOWWIDTH, g.WINDOWHEIGHT))
overlay.set_alpha(180)
overlay.fill((0, 0, 0))
background.blit(overlay, (0, 0))
target.blit(background, (0, 0))
# inside
surface.fill((0, 0, 0))
pygame.draw.rect(surface, (255, 255, 255), inside_rect)
# question
question = u.Text(self.question_str, g.BLACK,
g.communications_option_font, question_size)
question_surf = g.communications_option_font.render(
self.question_str, True, g.BLACK)
question_size = question_surf.get_size()
question_len = question_size[0]
question_x = (inside_rect[2] - question_len) // 2
question.render(surface, (question_x, question_pos[1]))
# text
text = u.Text(self.text_str, g.BLACK, g.another, text_size)
text.render(surface, text_pos)
default_cursor, pointer_cursor = u.cursors()
if notification is False:
if self.min_char_count != 0:
min_char_warning = u.Text('(nejméně ' + str(
self.min_char_count) + ' znaky / znaků)', g.BLACK, g.communications_hint_font)
min_char_warning.render(surface, char_count_pos)
if self.type == 'text':
# line
line = u.Text('_ _ _ _ _ _ _ _ _ _ _',
g.BLACK, g.communications_option_font)
line.render(surface, line_pos)
else:
# options
yes = u.Text('ANO', g.BLACK, g.communications_option_font)
yes.render(surface, yes_pos)
no = u.Text('NE', g.BLACK, g.communications_option_font)
no.render(surface, no_pos)
# hints
enter = u.Text('(ENTER)', g.BLACK, g.communications_hint_font)
esc = u.Text('(ESCAPE)', g.BLACK, g.communications_hint_font)
enter.render(surface, enter_pos)
esc.render(surface, esc_pos)
target.blit(surface, surface_pos)
if notification is True:
while True:
u.checkForQuit()
for event in pygame.event.get():
if event.type == MOUSEBUTTONUP:
x, y = event.pos
if not Rect(surface_pos, surface_size).collidepoint((x, y)):
return
if event.type == KEYUP:
if event.key == K_RETURN or event.key == K_ESCAPE:
return
pygame.display.update()
g.FPSCLOCK.tick(g.FPS)
pygame.event.pump()
elif self.type == 'text':
exit_dialogue = False
user_input = ''
last_chr = -1
text_input = u.Text(user_input, g.BLACK,
g.communications_option_font)
while True:
changed = False
u.checkForQuit()
for event in pygame.event.get():
if event.type == KEYUP:
changed = True
# uppercase
if pygame.key.get_mods() & KMOD_SHIFT or pygame.key.get_mods() & KMOD_CAPS:
if pygame.key.name(event.key) in g.ALPHABET:
user_input = user_input + \
chr(event.key).upper()
last_chr += 1
# lowercase
elif pygame.key.name(event.key) in g.ALPHABET:
user_input = user_input + chr(event.key)
last_chr += 1
# other
elif event.key == K_SPACE:
user_input = user_input + ' '
last_chr += 1
elif event.key == K_BACKSPACE:
if last_chr != -1:
user_input = user_input[:last_chr]
last_chr -= 1
elif event.key == K_RETURN:
if len(user_input) >= self.min_char_count:
return user_input
if event.type == MOUSEBUTTONUP:
x, y = event.pos
if not Rect(surface_pos, surface_size).collidepoint((x, y)):
return
if changed is True:
pygame.draw.rect(surface, (255, 255, 255),
(line_pos, (400, 50)))
line.render(surface, line_pos)
text_input = u.Text(user_input, g.BLACK,
g.communications_option_font)
text_input.render(surface, line_pos)
target.blit(surface, (130, 172))
pygame.display.update()
g.FPSCLOCK.tick(g.FPS)
pygame.event.pump()
else:
while True:
u.checkForQuit()
for event in pygame.event.get():
if event.type == KEYUP:
if event.key == K_RETURN:
return True
elif event.key == K_ESCAPE:
return False
if event.type == MOUSEBUTTONUP:
x, y = event.pos
if insist_answer is False:
if not Rect(surface_pos, surface_size).collidepoint((x, y)):
return
if yes_rect.collidepoint((x, y)):
u.change_cursor(
default_cursor, pointer_cursor, 'default')
return True
elif no_rect.collidepoint((x, y)):
u.change_cursor(
default_cursor, pointer_cursor, 'default')
return False
elif event.type == MOUSEMOTION:
pointer = False
x, y = event.pos
if yes_rect.collidepoint((x, y)):
pointer = True
u.change_cursor(
default_cursor, pointer_cursor, 'pointer')
elif no_rect.collidepoint((x, y)):
pointer = True
u.change_cursor(
default_cursor, pointer_cursor, 'pointer')
if pointer is False:
u.change_cursor(
default_cursor, pointer_cursor, 'default')
pygame.display.update()
g.FPSCLOCK.tick(g.FPS)
return user_input
def session(target, background, return_value, conditioned, dialogue_list, insist_answer=False):
'''Runs several Interactions'''
if background is None:
background = target.copy()
overlay = pygame.Surface((g.WINDOWWIDTH, g.WINDOWHEIGHT))
overlay.set_alpha(180)
overlay.fill((0, 0, 0))
background.blit(overlay, (0, 0))
target.blit(background, (0, 0))
returned_values = []
notification = True if return_value is None else False
for dialogue in dialogue_list:
returned_values.append(dialogue.run(
target, background, notification, insist_answer))
if conditioned is True and returned_values[0] is False:
return
if len(returned_values) > 0 and returned_values[-1] is None:
return
if return_value is None:
return
elif return_value == 'all':
return returned_values
else:
return returned_values[return_value]
# Tests
if __name__ == '__main__':
global DISPLAYSURF
DISPLAYSURF = pygame.display.set_mode(
(g.WINDOWWIDTH, g.WINDOWHEIGHT))
DISPLAYSURF.fill((255, 255, 255))
close = Interaction('yes_no', 'Odejít do hlavního menu?',
'Veškerý neuložený obsah bude smazán!\nHerní postup se uloží po ukončení kapitoly.')
start = Interaction('yes_no', 'Spustit novou hru',
'Váš dosavadní postup bude přepsán. Přejete si pokračovat?')
name = Interaction('text', 'Pojmenuj svého hrdinu:', '', 4)
print(session(DISPLAYSURF, None, 2, True, [start, name, close]))
notif = Interaction(
'text', 'Konec hry', "Dokončil jsi příběhovou kampaň. Pokud chceš začít novou hru, klikni na tlačítko 'Nová hra'.")
session(DISPLAYSURF, None, None, False, [notif])
| null | Game/communication.py | communication.py | py | 10,199 | python | en | code | null | code-starcoder2 | 50 |
20541919 | #!/usr/bin/env python3
"""
The MIT License (MIT)
Copyright (c) 2017 Erik Perillo <erik.perillo@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import tensorflow as tf
import sys
import random
import shutil
import time
import itertools
import numpy as np
import os
import util
import model
from config import infer as conf
import config
from dproc import infer_load as load
from dproc import infer_pre_proc as pre_proc
from dproc import infer_save_x as save_x
from dproc import infer_save_y_pred as save_y_pred
from dproc import infer_save_y_true as save_y_true
def predict(x, fn):
x = x.reshape((1, ) + x.shape)
y_pred = fn(x)
y_pred = y_pred.reshape(y_pred.shape[2:])
return y_pred
def mk_preds_dir(base_dir, pattern="train"):
"""
Creates dir to store predictions.
"""
#creating dir
out_dir = util.uniq_path(base_dir, pattern)
os.makedirs(out_dir)
return out_dir
def main():
if conf["rand_seed"] is not None:
random.seed(conf["rand_seed"])
if conf["input_fps"] is None:
if len(sys.argv) < 2:
print("usage: {} <filepath_or_dir_of_inputs>".format(sys.argv[0]))
exit()
else:
input_fps = sys.argv[1]
else:
input_fps = conf["input_fps"]
if isinstance(input_fps, str):
input_fps = [input_fps]
if conf["shuffle_input_fps"]:
random.shuffle(input_fps)
preds = None
trues = None
#creating base dir if needed
if not os.path.isdir(conf["preds_save_dir_basedir"]):
os.makedirs(conf["preds_save_dir_basedir"])
#creating preds dir
preds_dir = mk_preds_dir(conf["preds_save_dir_basedir"], "preds")
#copying model dir
util.mk_model_dir(preds_dir)
#meta-model
meta_model = model.MetaModel()
with tf.Session(graph=tf.Graph()) as sess:
#loading model weights
print("loading model", flush=True)
model.load(sess, conf["model_path"])
print("setting params", flush=True)
meta_model.set_params_from_colls()
#building functions
print("getting pred fn", flush=True)
_pred_fn = meta_model.get_pred_fn(sess)
pred_fn = lambda x: predict(x, _pred_fn)
print("iterating", flush=True)
indexes = None
#iterating over images doing predictions
for i, fp in enumerate(input_fps):
print("on image '{}'".format(fp))
x, y_true = load(fp)
print("\tpredicting...")
print("\tx shape:", x.shape)
x_ = pre_proc(x.copy())
start_time = time.time()
y_pred = pred_fn(x_)
pred_time = time.time() - start_time
print("\tdone predicting. took {:.6f} seconds".format(pred_time))
print("\ty_pred shape:", y_pred.shape)
if conf["save_tables"]:
#getting indexes
if indexes is None:
pts_per_img = y_pred.size
if conf["max_pred_points"] is not None:
pts_per_img = min(
conf["max_pred_points"]//len(input_fps),
pts_per_img)
indexes = list(range(pts_per_img))
if len(indexes) < y_pred.size:
random.shuffle(indexes)
if preds is None:
preds = y_pred.flatten()[indexes]
else:
preds = np.vstack((preds, y_pred.flatten()[indexes]))
if conf["with_trues"]:
if trues is None:
trues = y_true.flatten()[indexes]
else:
trues = np.vstack((trues, y_true.flatten()[indexes]))
if conf["max_n_preds_save"] is None or i < conf["max_n_preds_save"]:
fn = os.path.basename(fp)
name = fn.split(".")[0]
ext = ("." + fn.split(".")[-1]) if "." in fn else ""
#saving x
if save_x is not None:
save_x(x, preds_dir, name)
#saving prediction
if save_y_pred is not None:
save_y_pred(y_pred, preds_dir, name)
#saving ground-truth
if save_y_true is not None and conf["with_trues"]:
save_y_true(y_true, preds_dir, name)
#saving predictions
if conf["save_tables"] and preds is not None:
fp = os.path.join(preds_dir, "table.npz")
print("saving table to '{}'...".format(fp, flush=True))
if conf["with_trues"]:
np.savez(fp, y_pred=preds, y_true=trues, x_fp=input_fps)
else:
np.savez(fp, y_pred=preds, x_fp=input_fps)
print("saved everything to", preds_dir)
if __name__ == "__main__":
main()
| null | att/upeek/upeek/infer.py | infer.py | py | 5,854 | python | en | code | null | code-starcoder2 | 51 |
114639253 | from ghetto_manim import *
import time
# Suppress scientific notation
np.set_printoptions(suppress=True)
# Special Shape Classes
class Arrow(ParamShapeGroup):
def __init__(self, x0, y0, x1, y1, color, fill_p=0., curve_place=0.5, curve_amount=0, start=0, stop=1):
curve = CurvedLine(x0, y0, x1, y1, color, -1, curve_place, curve_amount, stop=1)
triangle = Triangle(x1, y1, color, fill_p=fill_p, stop=1, rot_theta=curve.curve_angle(), scale_x=10, scale_y=10)
super().__init__([curve, triangle], start=start, stop=stop)
# Combined Animation Functions
# ...
# Time Step
dt = 0.001
def scene():
# Object Construction
r = 40
k = 1.05
l = 0.83
original_color = apple_colors['lightindigo']
rect = Circle(0, 0, 2*r, color=original_color, fill_p=-1, stop=0)
rect2 = Circle(0, 0, 2*k*r, color=apple_colors['lightyellow'], stop=0)
rect_0 = Circle(0, 0, 2*l*r, color=original_color, fill_p=0, stop=0)
circle = Circle(-400, 0, 2*r, color=apple_colors['lightpurple'], stop=1)
oliver = Arrow(-400, 0, 0, 0, apple_colors['lightpurple'], curve_amount=0, fill_p=0.15, stop=0)
queen = Arrow(0, 100, 35, 100, apple_colors['lightyellow'], curve_amount=400, fill_p=0.15, stop=0)
thea = Arrow(-400, 2*r, -2*r, 0, apple_colors['lightteal'], curve_amount=200, fill_p=0.15, stop=0)
objects = [oliver, queen, thea, rect, rect2, rect_0, circle]
# Animation Tree Construction
animator = Animator()
empty_anim = animator.get_root()
draw = animator.add_animation(oliver.draw_step, [1, smooth], duration=30, parent_animation=empty_anim, delay=0)
draw2 = animator.add_animation(queen.draw_step, [1, smooth], duration=30, parent_animation=empty_anim, delay=0)
draw3 = animator.add_animation(rect.draw_step, [1, smooth], duration=30, parent_animation=empty_anim, delay=0)
draw4 = animator.add_animation(rect_0.draw_step, [1, smooth], duration=30, parent_animation=empty_anim, delay=0)
trace0 = animator.add_animation(thea.draw_step, [1, smooth], duration=20, parent_animation=draw4, delay=15)
# Recognizing Vertex Reached
trace = animator.add_animation(rect2.draw_step, [1, smooth], duration=30, parent_animation=trace0, delay=0)
untrace = animator.add_animation(rect2.undraw, [1, smooth], duration=20, parent_animation=trace0, delay=15)
remove_trace = animator.add_animation(rect2.abrupt_removal, [], duration=1, parent_animation=untrace, delay=0)
scale_up = animator.add_animation(rect.scale_step, [1.1, 1.1, smooth], duration=20, parent_animation=trace0, delay=25)
scale_up2 = animator.add_animation(rect2.scale_step, [1.1, 1.1, smooth], duration=20, parent_animation=trace0, delay=25)
scale_up3 = animator.add_animation(rect_0.scale_step, [1.2, 1.2, smooth], duration=20, parent_animation=trace0, delay=25)
fill_in = animator.add_animation(rect_0.fade_fill_step, [0.05, smooth], duration=20, parent_animation=trace0, delay=30)
change_color = animator.add_animation(rect.fade_color_step, [apple_colors['lightyellow'], smooth], duration=20, parent_animation=trace0, delay=30)
change_color = animator.add_animation(rect_0.fade_color_step, [apple_colors['lightyellow'], smooth], duration=20, parent_animation=trace0, delay=30)
# Recognizing Vertex Left
invisible_flip = animator.add_animation(rect2.scale_step, [-1, 1, smooth], duration=1, parent_animation=change_color, delay=0)
invisible_change_color = animator.add_animation(rect2.fade_color_step, [(255,255,255), smooth], duration=1, parent_animation=change_color, delay=0)
pause = 20
scale_down = animator.add_animation(rect.scale_step, [1, 1, smooth], duration=20, parent_animation=change_color, delay=pause+10)
scale_down2 = animator.add_animation(rect2.scale_step, [0.9, 0.9, smooth], duration=20, parent_animation=change_color, delay=pause+10)
scale_down3 = animator.add_animation(rect_0.scale_step, [1, 1, smooth], duration=20, parent_animation=change_color, delay=pause+10)
fill_out = animator.add_animation(rect_0.fade_fill_step, [0, smooth], duration=20, parent_animation=change_color, delay=pause+10)
change_color_back = animator.add_animation(rect.fade_color_step, [original_color, smooth], duration=20, parent_animation=change_color, delay=pause+10)
change_color_back2 = animator.add_animation(rect_0.fade_color_step, [original_color, smooth], duration=20, parent_animation=change_color, delay=pause+10)
remove_trace = animator.add_animation(rect2.abrupt_removal, [], duration=1, parent_animation=untrace, delay=0)
# Scene Parameters
picasso = Painter(w, objects)
# Play em'!
while True:
animator.update_step()
picasso.paint_step()
time.sleep(dt)
# Main Function
if __name__ == '__main__':
scene()
# Necessary line for Tkinter
mainloop() | null | 2D Animation/Automata Scratch.py | Automata Scratch.py | py | 4,846 | python | en | code | null | code-starcoder2 | 51 |
297549485 | from flask import render_template, redirect, url_for, request, Blueprint, flash
from app import *
import psycopg2
personal_b = Blueprint('personal_b', __name__, template_folder="templates")
@personal_b.route("/personal/add", methods=["GET", "POST"])
def personal_add():
if request.method == "GET":
return render_template("personal/add.html")
elif request.method == "POST":
columns = ""
placeholders = ""
values = ()
for a in request.form:
columns += a + ","
placeholders += "%s,"
values += (request.form.get(a), )
conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)
cursor = conn.cursor()
cursor.execute("INSERT INTO personal(" + columns[:-1] + ") VALUES(" + placeholders[:-1] + ") RETURNING id;", values)
new_id = cursor.fetchone()[0]
conn.commit()
conn.close()
return redirect("/oneshot/add/" + str(new_id))
@personal_b.route("/personal/search", methods=["GET", "POST"])
def personal_search():
if request.method == "GET":
return render_template("personal/search.html")
elif request.method == "POST":
conditions = ""
values = ()
for a in request.form:
if request.form.get(a) != "":
conditions += a + " ILIKE %s||'%%' AND "
values += (request.form.get(a), )
conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)
cursor = conn.cursor()
if (conditions == ""):
cursor.execute("SELECT * FROM personal;")
else:
cursor.execute("SELECT * FROM personal WHERE " + conditions[:-5] + ";", values)
results = [x[:12] for x in cursor.fetchall()]
conn.close()
if len(results) == 0:
flash("No matches found")
return redirect(url_for("personal_b.details_search"))
else:
return render_template("/personal/results.html", results=results)
# @app.route("/search", methods=["GET", "POST"])
# def search():
# if request.method == "GET":
# return render_template("search.html")
# elif request.method == "POST":
# conditions = ""
# values = ()
# for a in request.form:
# if request.form.get(a) != "":
# conditions += a + " ILIKE %s||'%%' AND "
# values += (request.form.get(a), )
# conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)
# cursor = conn.cursor()
# if (conditions == ""):
# cursor.execute("SELECT * FROM students;")
# else:
# cursor.execute("SELECT * FROM students WHERE " + conditions[:-5] + ";", values)
# results = cursor.fetchall()
# results = [x[:15] for x in results]
# if (results):
# conn.close()
# return render_template("results.html", results=results)
# else:
# flash("No matches found")
# return redirect(url_for("search"))
# @app.route("/edit/<int:id>", methods=["GET", "POST"])
# def edit(id):
# if request.method == "GET":
# conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)
# cursor = conn.cursor()
# cursor.execute("SELECT * FROM students WHERE id=%s", (id, ))
# student = cursor.fetchall()[0]
# print(student)
# conn.close()
# return render_template("edit.html", student=student)
# else:
# columns = ""
# values = ()
# for a in request.form:
# columns += a + "=%s,"
# values += (request.form.get(a), )
# conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)
# cursor = conn.cursor()
# cursor.execute("UPDATE students SET " + columns[:-1] + " WHERE id=%s;", values + (id, ))
# conn.commit()
# conn.close()
# return redirect(url_for("search"))
# @app.route("/delete/<int:id>", methods=["GET"])
# def delete(id):
# conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)
# cursor = conn.cursor()
# cursor.execute("DELETE FROM students WHERE id=%s", (id, ))
# conn.commit()
# conn.close()
# return redirect(url_for("search")) | null | blueprints/personal.py | personal.py | py | 3,857 | python | en | code | null | code-starcoder2 | 51 |
417646417 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from typing import Callable, Any
from context import in3120
def data_path(filename: str):
return "../data/" + filename
def simple_repl(prompt: str, evaluator: Callable[[str], Any]):
from timeit import default_timer as timer
import pprint
printer = pprint.PrettyPrinter()
escape = "!"
print(f"Enter '{escape}' to exit.")
while True:
print(f"{prompt}>", end="")
query = input()
if query == escape:
break
start = timer()
matches = evaluator(query)
end = timer()
printer.pprint(matches)
print(f"Evaluation took {end - start} seconds.")
def repl_a():
print("Building inverted index from Cranfield corpus...")
normalizer = in3120.BrainDeadNormalizer()
tokenizer = in3120.BrainDeadTokenizer()
corpus = in3120.InMemoryCorpus(data_path("cran.xml"))
index = in3120.InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer)
print("Enter one or more index terms and inspect their posting lists.")
simple_repl("terms", lambda ts: {t: list(index.get_postings_iterator(t)) for t in index.get_terms(ts)})
def repl_b_1():
print("Building suffix array from Cranfield corpus...")
normalizer = in3120.BrainDeadNormalizer()
tokenizer = in3120.BrainDeadTokenizer()
corpus = in3120.InMemoryCorpus(data_path("cran.xml"))
engine = in3120.SuffixArray(corpus, ["body"], normalizer, tokenizer)
options = {"debug": False, "hit_count": 5}
print("Enter a prefix phrase query and find matching documents.")
print(f"Lookup options are {options}.")
print("Returned scores are occurrence counts.")
simple_repl("query", lambda q: list(engine.evaluate(q, options)))
def repl_b_2():
print("Building trie from MeSH corpus...")
normalizer = in3120.BrainDeadNormalizer()
tokenizer = in3120.BrainDeadTokenizer()
corpus = in3120.InMemoryCorpus(data_path("mesh.txt"))
dictionary = in3120.Trie()
dictionary.add((normalizer.normalize(normalizer.canonicalize(d["body"])) for d in corpus), tokenizer)
engine = in3120.StringFinder(dictionary, tokenizer)
print("Enter some text and locate words and phrases that are MeSH terms.")
simple_repl("text", lambda t: list(engine.scan(normalizer.normalize(normalizer.canonicalize(t)))))
def repl_c():
print("Indexing English news corpus...")
normalizer = in3120.BrainDeadNormalizer()
tokenizer = in3120.BrainDeadTokenizer()
corpus = in3120.InMemoryCorpus(data_path("en.txt"))
index = in3120.InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer)
ranker = in3120.BrainDeadRanker()
engine = in3120.SimpleSearchEngine(corpus, index)
options = {"debug": False, "hit_count": 5, "match_threshold": 0.5}
print("Enter a query and find matching documents.")
print(f"Lookup options are {options}.")
print(f"Tokenizer is {tokenizer.__class__.__name__}.")
print(f"Ranker is {ranker.__class__.__name__}.")
simple_repl("query", lambda q: list(engine.evaluate(q, options, ranker)))
def repl_d_1():
print("Indexing MeSH corpus...")
normalizer = in3120.BrainDeadNormalizer()
tokenizer = in3120.ShingleGenerator(3)
corpus = in3120.InMemoryCorpus(data_path("mesh.txt"))
index = in3120.InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer)
ranker = in3120.BrainDeadRanker()
engine = in3120.SimpleSearchEngine(corpus, index)
options = {"debug": False, "hit_count": 5, "match_threshold": 0.5}
print("Enter a query and find matching documents.")
print(f"Lookup options are {options}.")
print(f"Tokenizer is {tokenizer.__class__.__name__}.")
print(f"Ranker is {ranker.__class__.__name__}.")
simple_repl("query", lambda q: list(engine.evaluate(q, options, ranker)))
def repl_d_2():
print("Indexing English news corpus...")
normalizer = in3120.BrainDeadNormalizer()
tokenizer = in3120.BrainDeadTokenizer()
corpus = in3120.InMemoryCorpus(data_path("en.txt"))
index = in3120.InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer)
ranker = in3120.BetterRanker(corpus, index)
engine = in3120.SimpleSearchEngine(corpus, index)
options = {"debug": False, "hit_count": 5, "match_threshold": 0.5}
print("Enter a query and find matching documents.")
print(f"Lookup options are {options}.")
print(f"Tokenizer is {tokenizer.__class__.__name__}.")
print(f"Ranker is {ranker.__class__.__name__}.")
simple_repl("query", lambda q: list(engine.evaluate(q, options, ranker)))
def repl_e():
print("Initializing naive Bayes classifier from news corpora...")
normalizer = in3120.BrainDeadNormalizer()
tokenizer = in3120.BrainDeadTokenizer()
languages = ["en", "no", "da", "de"]
training_set = {language: in3120.InMemoryCorpus(data_path(f"{language}.txt")) for language in languages}
classifier = in3120.NaiveBayesClassifier(training_set, ["body"], normalizer, tokenizer)
print(f"Enter some text and classify it into {languages}.")
print(f"Returned scores are log-probabilities.")
simple_repl("text", lambda t: list(classifier.classify(t)))
def main():
repls = {"a": repl_a,
"b-1": repl_b_1,
"b-2": repl_b_2,
"c": repl_c,
"d-1": repl_d_1,
"d-2": repl_d_2,
"e": repl_e}
targets = sys.argv[1:]
if not targets:
print(f"{sys.argv[0]} [{'|'.join(key for key in repls.keys())}]")
else:
for target in targets:
if target in repls:
repls[target.lower()]()
if __name__ == "__main__":
main()
| null | tests/repl.py | repl.py | py | 5,674 | python | en | code | null | code-starcoder2 | 51 |
568310842 | # -*- coding: utf-8 -*-
""" Import Best data """
import MySQLdb
import MySQLdb.cursors
from datetime import date
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from catalog.models import Rental, RentalPrice, CatalogImage, RentalImagePage, Region, RegionPage, Mode
from wagtail.core.models import Page, Site
class Command(BaseCommand):
""" Import management command """
help = "Import rental data"
def handle(self, *args, **options):
model_map = {
'field_1033_1': 'title',
'field_900_1': 'notes',
'field_466_1': 'description',
'field_1063_1': 'mode_of_drive',
'field_465_1': 'category',
'field_442_1': 'included_services',
'field_443_1': 'not_included_services',
'rentalcars_vehicle.oid': 'travel_id',
}
Rental.objects.all().delete()
db_connection = MySQLdb.connect(
host=settings.MYSQL_IMPORT_HOST,
port=settings.MYSQL_IMPORT_PORT,
user=settings.MYSQL_IMPORT_USER,
passwd=settings.MYSQL_IMPORT_PASSWD,
db=settings.MYSQL_IMPORT_DB,
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor)
cursor = db_connection.cursor()
site = Site.objects.all()[0]
root_page = site.root_page
try:
link_page = Page.objects.get(title='BOT_Import')
except:
link_page = Page()
setattr(link_page, 'title', 'BOT_Import')
root_page.add_child(instance=link_page)
query = """SELECT %(db)s.rentalcars.*, %(db)s.rentalcars_vehicle.*
FROM %(db)s.rentalcars
INNER JOIN %(db)s.rentalcars_vehicle
ON %(db)s.rentalcars_vehicle.parentid = %(db)s.rentalcars.oid
WHERE %(db)s.rentalcars.language='D'
AND %(db)s.rentalcars_vehicle.language='D'
%(ic)s""" % {'db':settings.MYSQL_IMPORT_DB, 'ic': settings.IMPORT_CAP}
cursor.execute(query)
for result in cursor:
page = Rental()
for k in model_map:
setattr(page, model_map[k], result[k])
setattr(page, 'show_in_menus', True)
# Import region if it doesnt exist
region = Region.objects.get_or_create(import_name=result['field_431_1'])
setattr(page, 'country_relation', region[0])
# Import mode if it doesnt exist
mode = Mode.objects.get_or_create(import_name='Mietwagen')
setattr(page, 'travel_mode_relation', mode[0])
# Create region pages if they dont exist
try:
region_page = RegionPage.objects.get(title=result['field_431_1'])
except RegionPage.MultipleObjectsReturned:
region_page = RegionPage.objects.filter(title=result['field_431_1'])[0]
except RegionPage.DoesNotExist:
region_page = RegionPage(title=result['field_431_1'], region=region[0], show_in_menus=True)
root_page.add_child(instance=region_page)
try:
mode_page = RegionPage.objects.descendant_of(region_page).get(title='Mietwagen')
except RegionPage.DoesNotExist:
mode_page = RegionPage(
title='Mietwagen', mode=mode[0], show_in_menus=True)
region_page.add_child(instance=mode_page)
mode_page.add_child(instance=page)
# Get prices
price_cursor = db_connection.cursor()
price_query = """SELECT %(db)s.rentalcars_prices.*, %(db)s.rentalcars_price_periods.objectinfo
FROM %(db)s.rentalcars_prices
INNER JOIN %(db)s.rentalcars_price_periods
ON %(db)s.rentalcars_price_periods.oid = %(db)s.rentalcars_prices.subcontainer_oid_30
WHERE %(db)s.rentalcars_prices.subcontainer_oid_33 = %(car_oid)s AND %(db)s.rentalcars_prices.oid = %(oid)s
AND %(db)s.rentalcars_prices.language='D'
AND %(db)s.rentalcars_price_periods.language='D'""" % {'db': settings.MYSQL_IMPORT_DB, 'car_oid': result['rentalcars_vehicle.oid'], 'oid': result['oid']}
price_cursor.execute(price_query)
for price in price_cursor:
try:
imported_price = RentalPrice()
imported_price.price = price['price_sell']
imported_price.unit = price['price_sell_person_unit']
imported_price.usage = price['field_469_1']
imported_price.manual_order = price['subcontainer_sort_34']
dates_list = [date.split('.') for date in price['objectinfo'].split(' - ')]
imported_price.start_period = date(int(dates_list[0][2]), int(dates_list[0][1]), int(dates_list[0][0]))
imported_price.end_period = date(int(dates_list[1][2]), int(dates_list[1][1]), int(dates_list[1][0]))
imported_price.rental = page
imported_price.save()
except:
pass
# Get images
image_cursor = db_connection.cursor()
image_query = """SELECT *
FROM %s.pictures_objects
WHERE oid = %s""" % (settings.MYSQL_IMPORT_DB, result['oid'])
image_cursor.execute(image_query)
for image in image_cursor:
try:
catalog_image = RentalImagePage()
catalog_image.page = page
catalog_image.image = CatalogImage.objects.get(picid=image['picid'])
catalog_image.save()
except (CatalogImage.DoesNotExist):
pass
self.stdout.write(self.style.SUCCESS('Mietwagen importiert'))
| null | catalog/management/commands/import_rental.py | import_rental.py | py | 5,915 | python | en | code | null | code-starcoder2 | 51 |
568869424 | # ctypes: is a foreign function library for Python. It provides C compatible data types,
# and allows calling functions in DLLs or shared libraries.
# It can be used to wrap these libraries in pure Python.
import os
import ctypes
from scipy import integrate
from scipy import LowLevelCallable
import numpy as np
# os.path.abspath(path): Return a normalized absolutized version of the pathname path.
# ctypes.CDLL(): load dynamic link libraries (DLL), on Linux CDLL, on Windows WinDLL or OleDLL
lib = ctypes.CDLL(os.path.abspath('p_b_lib_test.so'))
# ctypes.c_double: ctype data type, in C data type: double, in python data type: float
# restype: specifies the return type -> in this case a C double/ python float
lib.f.restype = ctypes.c_double
# argtypes: It is possible to specify the required argument types of functions exported from DLLs by setting
# the argtypes attribute (first argument of the function is an integer, second argument is a double and
# third argument is a void)
# WICHTIG: void Funktion darf keinen Rückgabewert haben!
lib.f.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double))
# ctypes.cast(obj, type): This function is similar to the cast operator in C.
# It returns a new instance of type which points to the same memory block as 'obj'.
# 'type' must be a pointer type, and 'obj' must be an object that can be interpreted as a pointer.
# user_data = ctypes.cast(ctypes.pointer(c), ctypes.c_void_p)
func = LowLevelCallable(lib.f)
f_dsnb = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.3, 0.15, 0.05, 0, 0])
f_ccatmo = np.array([0.1, 0.2, 0.35, 0.45, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9])
f_reactor = np.array([5, 2, 1, 0, 0, 0, 0, 0, 0, 0])
f_data = np.array([1, 0, 0, 0, 1, 0, 0, 1, 0, 0])
fraction = np.array([f_dsnb, f_ccatmo, f_reactor, f_data], dtype='float')
# integrate the function
integral = integrate.nquad(func, [[0.5, 1.5], [0.5, 1.5], [0.5, 1.5], [0, 0.1], [1, 1.1], [1, 1.1], [2, 2.1], [2, 2.1],
[3, 3.1], [3, 3.1], [4, 4.1]])
# print integral-value:
print(integral)
| null | source/test_python_in_c/p_b_lib.py | p_b_lib.py | py | 2,076 | python | en | code | null | code-starcoder2 | 51 |
269328851 | from typing import List, Generator, Any
import luigi
from exasol_integration_test_docker_environment.lib.base.flavor_task import FlavorBaseTask
from exasol_integration_test_docker_environment.lib.base.json_pickle_target import JsonPickleTarget
from exasol_integration_test_docker_environment.lib.data.database_credentials import DatabaseCredentialsParameter
from exaslct_src.exaslct.lib.tasks.test.run_db_test import RunDBTest
from exaslct_src.exaslct.lib.tasks.test.run_db_test_result import RunDBTestDirectoryResult, RunDBTestResult
from exaslct_src.exaslct.lib.tasks.test.run_db_tests_parameter import RunDBTestParameter
class RunDBTestsInDirectory(FlavorBaseTask,
RunDBTestParameter,
DatabaseCredentialsParameter):
directory = luigi.Parameter()
def extend_output_path(self):
return self.caller_output_path + (self.directory,)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._test_container_info = self.test_environment_info.test_container_info
self.tasks = self.create_test_tasks_from_directory(self.directory)
def run_task(self):
test_results = yield from self.run_tests()
result = RunDBTestDirectoryResult(test_results=test_results,
language=self.language,
test_folder=self.directory)
JsonPickleTarget(self.get_output_path().joinpath("test_results.json")).write(test_results, 4)
self.return_object(result)
def run_tests(self) -> Generator[RunDBTest, Any, List[RunDBTestResult]]:
test_results = []
for test_task_config in self.tasks:
test_result_future = yield from self.run_dependencies(test_task_config)
test_result = self.get_values_from_future(test_result_future)
test_results.append(test_result)
return test_results
def create_test_tasks_from_directory(
self, directory: str):
test_container = self._client.containers.get(self._test_container_info.container_name)
exit_code, ls_output = test_container.exec_run(cmd="ls /tests/test/%s/" % directory)
test_files = ls_output.decode("utf-8").split("\n")
tasks = [self.create_test_task(directory, test_file)
for test_file in test_files
if test_file != "" and test_file.endswith(".py")]
return tasks
def create_test_task(self, directory: str, test_file: str):
task = self.create_child_task_with_common_params(
RunDBTest,
test_file=directory + "/" + test_file
)
return task
| null | exaslct_src/exaslct/lib/tasks/test/run_db_test_in_directory.py | run_db_test_in_directory.py | py | 2,691 | python | en | code | null | code-starcoder2 | 51 |
262590487 | import numpy as np
import os.path as osp
import tensorflow as tf
import gym
import time
from core import ReplayBuffer
from spinup.algos.tf1.td3 import core
from spinup.algos.tf1.td3.core import get_vars
from spinup.user_config import DEFAULT_DATA_DIR
from spinup.utils.logx import EpochLogger
from spinup.utils.test_policy import load_policy_and_env
def td3(env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=None,
steps_per_epoch=10000, epochs=10000, replay_size=int(2.5e6), gamma=0.99,
polyak=0.995, pi_lr=1e-4, q_lr=1e-4, batch_size=256, start_steps=10000,
update_after=10000, update_every=50, act_noise=0.1, target_noise=0.1,
noise_clip=0.5, policy_delay=2, num_test_episodes=50, max_ep_len=900,
logger_kwargs=dict(), save_freq=1, sess=None, load_1vs1="", num=0,
render=False, test_env_fn=None, use_es=True):
"""
Twin Delayed Deep Deterministic Policy Gradient (TD3)
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` (batch, act_dim) | Deterministically computes actions
| from policy given states.
``q1`` (batch,) | Gives one estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q2`` (batch,) | Gives another estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q1_pi`` (batch,) | Gives the composition of ``q1`` and
| ``pi`` for states in ``x_ph``:
| q1(x, pi(x)).
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to TD3.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
pi_lr (float): Learning rate for policy.
q_lr (float): Learning rate for Q-networks.
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
update_after (int): Number of env interactions to collect before
starting to do gradient descent updates. Ensures replay buffer
is full enough for useful updates.
update_every (int): Number of env interactions that should elapse
between gradient descent updates. Note: Regardless of how long
you wait between updates, the ratio of env steps to gradient steps
is locked to 1.
act_noise (float): Stddev for Gaussian exploration noise added to
policy at training time. (At test time, no noise is added.)
target_noise (float): Stddev for smoothing noise added to target
policy.
noise_clip (float): Limit for absolute value of target policy
smoothing noise.
policy_delay (int): Policy will only be updated once every
policy_delay times for each update of the Q-networks.
num_test_episodes (int): Number of episodes to test the deterministic
policy at the end of each epoch.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
max_ep_ret = -1e6
success_rate = 0
tf.set_random_seed(seed)
np.random.seed(seed)
env, test_env_fn = env_fn(), test_env_fn if test_env_fn is not None else env_fn
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
num_players = env.num_players
assert num_players == 4
assert num_players == test_env_fn().num_players
# Define indexes to use based on usage of experience sharing
es_1_idxs = [0, 1, 2] if use_es else [2]
es_2_idxs = [0, 1, 3] if use_es else [3]
es_rb_idxs = [0, 1, 2, 3] if use_es else [2, 3]
if sess is None:
sess = tf.Session()
# Inputs to computation graph
x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, None, None)
# Main outputs from computation graph
with tf.variable_scope('main'):
with tf.variable_scope('player_1'):
pi_1, q1_1, q2_1, q1_pi_1 = actor_critic(x_ph, a_ph, **ac_kwargs)
with tf.variable_scope('player_2'):
pi_2, q1_2, q2_2, q1_pi_2 = actor_critic(x_ph, a_ph, **ac_kwargs)
# Target policy network
with tf.variable_scope('target'):
with tf.variable_scope('player_1'):
pi_targ_1, _, _, _ = actor_critic(x2_ph, a_ph, **ac_kwargs)
with tf.variable_scope('player_2'):
pi_targ_2, _, _, _ = actor_critic(x2_ph, a_ph, **ac_kwargs)
# Target Q networks
with tf.variable_scope('target', reuse=True):
# Target policy smoothing, by adding clipped noise to target actions
epsilon_1 = tf.random_normal(tf.shape(pi_targ_1), stddev=target_noise)
epsilon_1 = tf.clip_by_value(epsilon_1, -noise_clip, noise_clip)
a2_1 = pi_targ_1 + epsilon_1
a2_1 = tf.clip_by_value(a2_1, -act_limit, act_limit)
epsilon_2 = tf.random_normal(tf.shape(pi_targ_2), stddev=target_noise)
epsilon_2 = tf.clip_by_value(epsilon_2, -noise_clip, noise_clip)
a2_2 = pi_targ_2 + epsilon_2
a2_2 = tf.clip_by_value(a2_2, -act_limit, act_limit)
# Target Q-values, using action from target policy
with tf.variable_scope('player_1'):
_, q1_targ_1, q2_targ_1, _ = actor_critic(x2_ph, a2_1, **ac_kwargs)
with tf.variable_scope('player_2'):
_, q1_targ_2, q2_targ_2, _ = actor_critic(x2_ph, a2_2, **ac_kwargs)
# Experience buffer
replay_buffer = {i: ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size) for i in es_rb_idxs}
# Count variables
var_counts = tuple(2 * core.count_vars(scope) for scope in ['main/player_1/pi', 'main/player_1/q1',
'main/player_1/q2', 'main/player_1/'])
print('\nNumber of parameters: \t pi: %d, \t q1: %d, \t q2: %d, \t total: %d\n'%var_counts)
# Bellman backup for Q functions, using Clipped Double-Q targets
min_q_targ_1 = tf.minimum(q1_targ_1, q2_targ_1)
min_q_targ_2 = tf.minimum(q1_targ_2, q2_targ_2)
backup_1 = tf.stop_gradient(r_ph + gamma * (1 - d_ph) * min_q_targ_1)
backup_2 = tf.stop_gradient(r_ph + gamma * (1 - d_ph) * min_q_targ_2)
# TD3 losses
pi_loss_1 = -tf.reduce_mean(q1_pi_1)
q1_loss_1 = tf.reduce_mean((q1_1 - backup_1) ** 2)
q2_loss_1 = tf.reduce_mean((q2_1 - backup_1) ** 2)
q_loss_1 = q1_loss_1 + q2_loss_1
pi_loss_2 = -tf.reduce_mean(q1_pi_2)
q1_loss_2 = tf.reduce_mean((q1_2 - backup_2) ** 2)
q2_loss_2 = tf.reduce_mean((q2_2 - backup_2) ** 2)
q_loss_2 = q1_loss_2 + q2_loss_2
# Separate train ops for pi, q
pi_optimizer_1 = tf.train.AdamOptimizer(learning_rate=pi_lr)
q_optimizer_1 = tf.train.AdamOptimizer(learning_rate=q_lr)
pi_optimizer_2 = tf.train.AdamOptimizer(learning_rate=pi_lr)
q_optimizer_2 = tf.train.AdamOptimizer(learning_rate=q_lr)
train_pi_op_1 = pi_optimizer_1.minimize(pi_loss_1, var_list=(get_vars('main/player_1/pi')))
train_pi_op_2 = pi_optimizer_2.minimize(pi_loss_2, var_list=(get_vars('main/player_2/pi')))
train_q_op_1 = q_optimizer_1.minimize(q_loss_1, var_list=(get_vars('main/player_1/q')))
train_q_op_2 = q_optimizer_2.minimize(q_loss_2, var_list=(get_vars('main/player_2/q')))
sess.run(tf.global_variables_initializer())
assert len(load_1vs1) == 2
assert 2 == len(num)
g1 = tf.Graph()
with g1.as_default():
__, _get_1v1_action_p1 = load_policy_and_env(osp.join(DEFAULT_DATA_DIR, load_1vs1[0]), num[0], sess=None)
g2 = tf.Graph()
with g2.as_default():
__, _get_1v1_action_p2 = load_policy_and_env(osp.join(DEFAULT_DATA_DIR, load_1vs1[1]), num[1], sess=None)
get_1v1_action_1 = lambda x: [_get_1v1_action_p1(x)]
get_1v1_action_2 = lambda x: [_get_1v1_action_p2(x)]
# Polyak averaging for target variables
target_update_1 = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)
for v_main, v_targ in zip(get_vars('main/player_1'), get_vars('target/player_1'))])
target_update_2 = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)
for v_main, v_targ in zip(get_vars('main/player_2'), get_vars('target/player_2'))])
# Initializing targets to match main variables
target_init_1 = tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main/player_1'), get_vars('target/player_1'))])
target_init_2 = tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main/player_2'), get_vars('target/player_2'))])
sess.run([target_init_1, target_init_2])
# Setup model saving
logger.setup_tf_saver(sess, inputs={'x': x_ph, 'a': a_ph}, outputs={'pi_1': pi_1, 'q1_1': q1_1, 'q2_1': q2_1,
'pi_2': pi_2, 'q1_2': q1_2, 'q2_2': q2_2})
def get_action(o, noise_scale, batch_size=1):
a_1 = sess.run(pi_1, feed_dict={x_ph: o[::2].reshape(batch_size, -1)})
a_2 = sess.run(pi_2, feed_dict={x_ph: o[1::2].reshape(batch_size, -1)})
a = np.zeros((a_1.shape[0] + a_2.shape[0], a_1.shape[1]))
a[::2] = a_1
a[1::2] = a_2
a += noise_scale * np.random.randn(batch_size, act_dim)
return [np.ravel(x) for x in np.split(np.clip(a, -act_limit, act_limit), 2 * batch_size, axis=0)]
def test_agent():
success_rate = 0
avg_ret = np.zeros(num_players)
test_env = test_env_fn()
max_ep_len = test_env.time_limit // test_env.control_timestep
for j in range(num_test_episodes):
o = test_env.reset()
d, ep_ret, ep_ret_sparse, ep_len = False, np.zeros(num_players), np.zeros(num_players), 0
vel_to_ball = []
spread_out = []
intercepted = []
intercepted_5 = []
intercepted_10 = []
intercepted_15 = []
received = []
received_5 = []
received_10 = []
received_15 = []
received_p = []
received_p_5 = []
received_p_10 = []
received_p_15 = []
for k in range(num_players):
vel_to_ball.append([])
spread_out.append([])
intercepted.append([])
intercepted_5.append([])
intercepted_10.append([])
intercepted_15.append([])
received.append([])
received_5.append([])
received_10.append([])
received_15.append([])
received_p.append([])
received_p_5.append([])
received_p_10.append([])
received_p_15.append([])
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time (noise_scale=0)
if j % 2 == 0:
act_1 = get_1v1_action_1(o[0][np.r_[0:18, 18:24]]) + get_1v1_action_2(o[1][np.r_[0:18, 24:30]])
else:
act_1 = get_1v1_action_2(o[0][np.r_[0:18, 18:24]]) + get_1v1_action_1(o[1][np.r_[0:18, 24:30]])
a = act_1 + get_action(np.array(o[2:]), 0, (num_players - 2) // 2)
o, r, d, _ = test_env.step(a)
if j == 0 and render:
test_env.render()
for k in range(num_players):
test_obs = test_env.timestep.observation[k]
_switch_k = (2 - k - 1) if ((k < 2) and (j % 2 == 1)) else k
ep_ret[_switch_k] += r[k]
ep_ret_sparse[_switch_k] += test_env.timestep.reward[k]
vel_to_ball[_switch_k].append(test_obs['stats_vel_to_ball'])
spread_out[_switch_k].append(test_obs['stats_teammate_spread_out'])
intercepted[_switch_k].append(test_obs['stats_opponent_intercepted_ball'])
intercepted_5[_switch_k].append(test_obs['stats_opponent_intercepted_ball_5m'])
intercepted_10[_switch_k].append(test_obs['stats_opponent_intercepted_ball_10m'])
intercepted_15[_switch_k].append(test_obs['stats_opponent_intercepted_ball_15m'])
received[_switch_k].append(test_obs['stats_i_received_ball'])
received_5[_switch_k].append(test_obs['stats_i_received_ball_5m'])
received_10[_switch_k].append(test_obs['stats_i_received_ball_10m'])
received_15[_switch_k].append(test_obs['stats_i_received_ball_15m'])
received_p[_switch_k].append(test_obs['stats_i_received_pass'])
received_p_5[_switch_k].append(test_obs['stats_i_received_pass_5m'])
received_p_10[_switch_k].append(test_obs['stats_i_received_pass_10m'])
received_p_15[_switch_k].append(test_obs['stats_i_received_pass_15m'])
ep_len += 1
success_rate += (ep_len <= max_ep_len and test_env.timestep.reward[0] < 0) / num_test_episodes
avg_ret += ep_ret / num_test_episodes
ep_ret_dict = {}
for i in range(num_players):
ep_ret_dict[f"TestEpRet_P{i + 1}"] = ep_ret[i]
ep_ret_dict[f"TestEpRetSparse_P{i + 1}"] = ep_ret_sparse[i]
ep_ret_dict[f"TestEpStatsVelToBall_P{i + 1}"] = np.mean(vel_to_ball[i])
ep_ret_dict[f"TestEpStatsTeamSpreadOut_P{i + 1}"] = np.mean(spread_out[i])
ep_ret_dict[f"TestEpStatsOpIntercepted_P{i + 1}"] = np.mean(intercepted[i])
ep_ret_dict[f"TestEpStatsOpIntercepted_5m_P{i + 1}"] = np.mean(intercepted_5[i])
ep_ret_dict[f"TestEpStatsOpIntercepted_10m_P{i + 1}"] = np.mean(intercepted_10[i])
ep_ret_dict[f"TestEpStatsOpIntercepted_15m_P{i + 1}"] = np.mean(intercepted_15[i])
ep_ret_dict[f"TestEpStatsIReceived_P{i + 1}"] = np.mean(received[i])
ep_ret_dict[f"TestEpStatsIReceived_5m_P{i + 1}"] = np.mean(received_5[i])
ep_ret_dict[f"TestEpStatsIReceived_10m_P{i + 1}"] = np.mean(received_10[i])
ep_ret_dict[f"TestEpStatsIReceived_15m_P{i + 1}"] = np.mean(received_15[i])
ep_ret_dict[f"TestEpStatsIReceivedPass_P{i + 1}"] = np.mean(received_p[i])
ep_ret_dict[f"TestEpStatsIReceivedPass_5m_P{i + 1}"] = np.mean(received_p_5[i])
ep_ret_dict[f"TestEpStatsIReceivedPass_10m_P{i + 1}"] = np.mean(received_p_10[i])
ep_ret_dict[f"TestEpStatsIReceivedPass_15m_P{i + 1}"] = np.mean(received_p_15[i])
logger.store(**ep_ret_dict, TestEpLen=ep_len)
return success_rate, avg_ret
start_time = time.time()
o = env.reset()
ep_ret, ep_len = np.zeros(env.num_players), 0
total_steps = steps_per_epoch * epochs
epoch = 0
pkl_saved = False
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
# Define whether to switch 1vs1 players
switch = epoch % 2 == 0
# Until start_steps have elapsed, randomly sample actions
# from a uniform distribution for better exploration. Afterwards,
# use the learned policy (with some noise, via act_noise).
# Step the env
if switch:
act_1 = get_1v1_action_1(o[0][np.r_[0:18, 18:24]]) + get_1v1_action_2(o[1][np.r_[0:18, 24:30]])
else:
act_1 = get_1v1_action_2(o[0][np.r_[0:18, 18:24]]) + get_1v1_action_1(o[1][np.r_[0:18, 24:30]])
if t > start_steps:
a = act_1 + get_action(np.array(o[2:]), act_noise, (num_players - 2) // 2)
else:
a = act_1 + [env.action_space.sample() for _ in range(2, num_players)]
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += np.array(r)
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len==max_ep_len else d
# Store experience to replay buffer
if not switch:
[replay_buffer[j].store(o[j], a[j], r[j], o2[j], d) for j in es_rb_idxs]
else:
[replay_buffer[2 - j - 1].store(o[j], a[j], r[j], o2[j], d) for j in es_rb_idxs if j < 2]
[replay_buffer[j].store(o[j], a[j], r[j], o2[j], d) for j in range(2, num_players)]
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
# End of trajectory handling
if d or (ep_len == max_ep_len):
ep_ret_dict = {f"EpRet_P{i + 1}": ep_ret[i] for i in range(env.num_players)}
logger.store(**ep_ret_dict, EpLen=ep_len)
reached = False
o, ep_ret, ep_len = env.reset(), np.zeros(env.num_players), 0
# Update handling
if t >= update_after and t % update_every == 0:
for j in range(update_every):
batch_dicts = [replay_buffer[j].sample_batch(batch_size // len(es_1_idxs)) for j in es_1_idxs]
batch = {key: np.concatenate([batch_dicts[i][key] for i in range(len(es_1_idxs))], axis=0) for key in batch_dicts[0].keys()}
feed_dict = {x_ph: batch['obs1'],
x2_ph: batch['obs2'],
a_ph: batch['acts'],
r_ph: batch['rews'],
d_ph: batch['done']
}
q_step_ops_1 = [q_loss_1, q1_1, q2_1, train_q_op_1]
outs_q_1 = sess.run(q_step_ops_1, feed_dict)
if j % policy_delay == 0:
# Delayed policy update
outs_pi_1 = sess.run([pi_loss_1, train_pi_op_1, target_update_1], feed_dict)
batch_dicts = [replay_buffer[j].sample_batch(batch_size // len(es_2_idxs)) for j in es_2_idxs]
batch = {key: np.concatenate([batch_dicts[i][key] for i in range(len(es_2_idxs))], axis=0) for key in batch_dicts[0].keys()}
feed_dict = {x_ph: batch['obs1'],
x2_ph: batch['obs2'],
a_ph: batch['acts'],
r_ph: batch['rews'],
d_ph: batch['done']
}
q_step_ops_2 = [q_loss_2, q1_2, q2_1, train_q_op_2]
outs_q_2 = sess.run(q_step_ops_2, feed_dict)
logger.store(LossQ=outs_q_1[0] + outs_q_2[0], Q1Vals=outs_q_1[1] + outs_q_2[1], Q2Vals=outs_q_1[2] + outs_q_2[2])
if j % policy_delay == 0:
# Delayed policy update
outs_pi_2 = sess.run([pi_loss_2, train_pi_op_2, target_update_2], feed_dict)
logger.store(LossPi=outs_pi_1[0] + outs_pi_2[0])
# End of epoch wrap-up
if (t+1) % steps_per_epoch == 0:
epoch = (t+1) // steps_per_epoch
# Test the performance of the deterministic version of the agent.
act_suc_rate, act_avg_ret = test_agent()
# Save model
print(f"Best Success Rate: {int(success_rate * 100)}, Episode Return: {np.round(max_ep_ret, 2)}")
print(f"Step Success Rate: {int(act_suc_rate * 100)}, Step Episode Return: {np.round(act_avg_ret, 2)}", end=". ")
if ((epoch % save_freq == 0) or (epoch == epochs)) and (act_suc_rate >= success_rate):
logger.save_state({'env': env}, None, not(pkl_saved))
if not pkl_saved:
pkl_saved = True
tf.get_default_graph().finalize()
if g1 is not None: g1.finalize()
if g2 is not None: g2.finalize()
success_rate = act_suc_rate
max_ep_ret = act_avg_ret
print("Saving model ...")
print(f"New Best Success Rate: {int(success_rate * 100,)}, Average Episode Return: {np.round(max_ep_ret, 2)}")
else:
print("")
if (((epoch % save_freq == 0) or (epoch == epochs)) and (act_suc_rate >= 0.4)) or (epoch % (save_freq * 10) == 0):
logger.save_state({'env': env}, t)
print("Saving model ...")
# Log info about epoch
if t >= update_after:
logger.log_tabular('Epoch', epoch)
for i in range(num_players):
logger.log_tabular(f'EpRet_P{i + 1}', with_min_and_max=True)
logger.log_tabular(f'TestEpRet_P{i + 1}', with_min_and_max=True)
logger.log_tabular(f'TestEpRetSparse_P{i + 1}', with_min_and_max=True)
logger.log_tabular(f'TestEpStatsVelToBall_P{i + 1}', with_min_and_max=True)
logger.log_tabular(f"TestEpStatsTeamSpreadOut_P{i + 1}")
logger.log_tabular(f"TestEpStatsOpIntercepted_P{i + 1}")
logger.log_tabular(f"TestEpStatsOpIntercepted_5m_P{i + 1}")
logger.log_tabular(f"TestEpStatsOpIntercepted_10m_P{i + 1}")
logger.log_tabular(f"TestEpStatsOpIntercepted_15m_P{i + 1}")
logger.log_tabular(f"TestEpStatsIReceived_P{i + 1}")
logger.log_tabular(f"TestEpStatsIReceived_5m_P{i + 1}")
logger.log_tabular(f"TestEpStatsIReceived_10m_P{i + 1}")
logger.log_tabular(f"TestEpStatsIReceived_15m_P{i + 1}")
logger.log_tabular(f"TestEpStatsIReceivedPass_P{i + 1}")
logger.log_tabular(f"TestEpStatsIReceivedPass_5m_P{i + 1}")
logger.log_tabular(f"TestEpStatsIReceivedPass_10m_P{i + 1}")
logger.log_tabular(f"TestEpStatsIReceivedPass_15m_P{i + 1}")
logger.log_tabular('EpLen', with_min_and_max=True)
logger.log_tabular('TestEpLen', with_min_and_max=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('TestEpSuccessRate', act_suc_rate)
logger.log_tabular('Q1Vals', with_min_and_max=True)
logger.log_tabular('Q2Vals', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
import dm_soccer2gym
from math import ceil
parser = argparse.ArgumentParser()
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--epochs', type=int, default=2000)
parser.add_argument("--gpu", type=float, default=-1)
parser.add_argument("--reward", type=str, default="sparse")
parser.add_argument("--control_timestep", type=float, default=0.05)
parser.add_argument("--time_limit", type=float, default=45.)
parser.add_argument("--use_es", type=bool, default=True)
args = parser.parse_args()
from spinup.utils.run_utils import setup_logger_kwargs
es_tag = "es_" if args.use_es else ""
logger_kwargs = setup_logger_kwargs(f'td3_soccer_2vs2_{es_tag}{args.reward}_{args.control_timestep}', data_dir=osp.join(DEFAULT_DATA_DIR, "TD3/2vs2"), datestamp=True)
if args.gpu > 0:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
else:
sess = None
td3(lambda : dm_soccer2gym.make('2vs2', task_kwargs={"rew_type": args.reward, "time_limit": args.time_limit, "disable_jump": True,
"dist_thresh": 0.03, 'control_timestep': args.control_timestep, 'observables': 'all'}),
test_env_fn=lambda : dm_soccer2gym.make('2vs2', task_kwargs={"rew_type": "simple_v2", "time_limit": args.time_limit, "disable_jump": True,
"dist_thresh": 0.03, 'control_timestep': 0.05, 'random_state': 69, 'observables': 'all'}),
actor_critic=core.mlp_actor_critic_heads_v2,
gamma=args.gamma, epochs=args.epochs,
logger_kwargs=logger_kwargs,
sess=sess, max_ep_len=ceil(args.time_limit / args.control_timestep),
load_1vs1=["TD3/1vs1/2020-10-08_23-06-32_td3_soccer_1vs1_dense_0.05",
"TD3/1vs1/2020-10-08_23-07-33_td3_soccer_1vs1_dense_0.05"],
num=[9389999, 8629999], use_es=args.use_es)
| null | spinup/algos/tf1/td3/td3_goal_2vs2.py | td3_goal_2vs2.py | py | 27,230 | python | en | code | null | code-starcoder2 | 51 |
104642087 | # This program simualtes the backend of a ticket purchasing system
# Price per visitor is $5
# Price per member is $3.50
# You are to do the following
# 1. Identify all banned visitors with a filter call
# 2. Determine the memberships status of all applicants
# 3. Calculate the total price for all eligible visitors
# 4. For each valid visitor, return a corresponding ticket in Dictionary form
# 5. Return an error via thrown exception if applicants is empty
# Complete everything above in a function called processRequest
# Your should abstract out function as much as reasonably possible
bannedVisitors = ["Amy", "Grace", "Bruce"]
memberStatus = {
"Ally": True,
"David": True,
"Brendan": False
}
request = {
"applicants": ["Amy", "Ally", "David", "Brendan", "Zoho"]
}
def processRequest(request):
# Old Q5 (Did not satisfy requirements)
if(len(request["applicants"]) <= 0):
return {"error": "No applicants"}
# New Q5 Return an error via thrown exception if applicants is empty
try:
#Q1 (Did not satisfy Requirements)
def checkIfBanned(person):
for bannedPerson in bannedVisitors:
if person == bannedPerson:
return False
return True
successfulApplicants = list(filter(checkIfBanned, request["applicants"]))
# New Q1 Identify all banned visitors with a filter call
def checkIfBanned2(person):
for bannedPerson in bannedVisitors:
if person == bannedPerson:
return True
return False
bannedApplicants = list(filter(checkIfBanned2, request["applicants"]))
#Q2
def checkIfMember(person):
if person in memberStatus:
return memberStatus[person]
return False
memberList = list(filter(checkIfMember, successfulApplicants))
#Q3
def calculateTotalPrice():
numberOfMembers = len(memberList)
remainingPeople = len(successfulApplicants) - numberOfMembers
return numberOfMembers * 3.5 + remainingPeople * 5
totalCost = calculateTotalPrice()
#Q4
def createDictForPerson(name, status, price):
return {
"name": name,
"membershipStatus": status,
"price": price
}
tickets = []
for onePerson in memberList:
tickets.append(createDictForPerson(onePerson, "member", 3.5))
for onePerson in successfulApplicants:
if (onePerson not in memberList):
tickets.append(createDictForPerson(onePerson, "non-member", 5))
return {
"successfulApplicants": successfulApplicants,
"bannedApplicants": bannedApplicants,
"totalCost": totalCost,
"tickets": tickets
}
except:
return {"error": "No applicants"}
print(processRequest(request))
# {
# successfulApplicants:
# bannedApplicatns:
# totalCost:
# tickets: [
# {
# "name": ,
# "membershipStatus": ,
# "price":
# }, ....
# ]
#
# }
# OR
# {"error": "No applicants"}
| null | 1-python-question.py | 1-python-question.py | py | 3,295 | python | en | code | null | code-starcoder2 | 51 |
629201190 | import inspect
import numpy as np
import os
from unittest import TestCase
from fitbenchmarking import mock_problems
from fitbenchmarking.controllers.base_controller import Controller
from fitbenchmarking.controllers.controller_factory import ControllerFactory
from fitbenchmarking.controllers.dfo_controller import DFOController
from fitbenchmarking.controllers.gsl_controller import GSLController
from fitbenchmarking.controllers.mantid_controller import MantidController
from fitbenchmarking.controllers.minuit_controller import MinuitController
from fitbenchmarking.controllers.ralfit_controller import RALFitController
from fitbenchmarking.controllers.sasview_controller import SasviewController
from fitbenchmarking.controllers.scipy_controller import ScipyController
from fitbenchmarking.parsing.parser_factory import parse_problem_file
from fitbenchmarking.utils import exceptions
def make_fitting_problem():
"""
Helper function that returns a simple fitting problem
"""
bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems))
fname = os.path.join(bench_prob_dir, 'cubic.dat')
fitting_problem = parse_problem_file(fname)
fitting_problem.correct_data(True)
return fitting_problem
class DummyController(Controller):
"""
Minimal instantiatable subclass of Controller class for testing
"""
def setup(self):
self.setup_result = 53
def fit(self):
raise NotImplementedError
def cleanup(self):
raise NotImplementedError
def error_flags(self):
raise NotImplementedError
class BaseControllerTests(TestCase):
"""
Tests for base software controller class methods.
"""
def setUp(self):
self.problem = make_fitting_problem()
def test_data(self):
"""
BaseSoftwareController: Test data is read into controller correctly
"""
controller = DummyController(self.problem)
if self.problem.start_x is not None:
assert min(controller.data_x) >= self.problem.start_x
if self.problem.end_x is not None:
assert max(controller.data_x) <= self.problem.end_x
assert len(controller.data_e) == len(controller.data_x)
assert len(controller.data_e) == len(controller.data_y)
self.assertTrue(all(x in self.problem.data_x
for x in controller.data_x))
self.assertTrue(all(y in self.problem.data_y
for y in controller.data_y))
e_is_default = self.problem.data_e is None
if not e_is_default:
self.assertTrue(all(e in self.problem.data_e
for e in controller.data_e))
def test_prepare(self):
"""
BaseSoftwareController: Test prepare function
"""
controller = DummyController(self.problem)
controller.minimizer = 'test'
controller.parameter_set = 0
controller.prepare()
assert controller.setup_result == 53
def test_eval_chisq_no_errors(self):
"""
BaseSoftwareController: Test eval_chisq function
"""
controller = DummyController(self.problem)
params = np.array([1, 2, 3, 4])
x = np.array([6, 2, 32, 4])
y = np.array([1, 21, 3, 4])
e = None
result = self.problem.eval_r_norm(params=params, x=x, y=y, e=e)
assert controller.eval_chisq(params=params, x=x, y=y, e=e) == result
def test_eval_chisq_with_errors(self):
"""
BaseSoftwareController: Test eval_chisq function
"""
controller = DummyController(self.problem)
params = np.array([1, 2, 3, 4])
x = np.array([6, 2, 32, 4])
y = np.array([1, 21, 3, 4])
e = np.array([.5, .003, 1, 2])
result = self.problem.eval_r_norm(params=params, x=x, y=y, e=e)
assert controller.eval_chisq(params=params, x=x, y=y, e=e) == result
def test_check_flag_attr_true(self):
"""
BaseSoftwareController: Test check_attributes function for flag
attribute
"""
controller = DummyController(self.problem)
controller.flag = 1
controller.check_attributes()
def test_check_flag_attr_false(self):
"""
BaseSoftwareController: Test check_attributes function for flag
attribute
"""
controller = DummyController(self.problem)
with self.assertRaises(exceptions.ControllerAttributeError):
controller.check_attributes()
controller.flag = 10
with self.assertRaises(exceptions.ControllerAttributeError):
controller.check_attributes()
class ControllerTests(TestCase):
"""
Tests for each controller class
"""
def setUp(self):
self.problem = make_fitting_problem()
def test_mantid(self):
"""
MantidController: Test for output shape
"""
controller = MantidController(self.problem)
controller.minimizer = 'Levenberg-Marquardt'
self.shared_testing(controller)
controller._status = "success"
self.check_conveged(controller)
controller._status = "Failed to converge"
self.check_max_iterations(controller)
controller._status = "Failed"
self.check_diverged(controller)
def test_sasview(self):
"""
SasviewController: Test for output shape
"""
controller = SasviewController(self.problem)
controller.minimizer = 'amoeba'
self.shared_testing(controller)
controller._status = 0
self.check_conveged(controller)
controller._status = 2
self.check_max_iterations(controller)
controller._status = 1
self.check_diverged(controller)
def test_scipy(self):
"""
ScipyController: Test for output shape
"""
controller = ScipyController(self.problem)
controller.minimizer = 'lm'
self.shared_testing(controller)
controller._status = 1
self.check_conveged(controller)
controller._status = 0
self.check_max_iterations(controller)
controller._status = -1
self.check_diverged(controller)
def test_dfo(self):
"""
DFOController: Tests for output shape
"""
controller = DFOController(self.problem)
# test one from each class
minimizers = ['dfogn',
'dfols']
for minimizer in minimizers:
controller.minimizer = minimizer
self.shared_testing(controller)
controller._status = 0
self.check_conveged(controller)
controller._status = 2
self.check_max_iterations(controller)
controller._status = 5
self.check_diverged(controller)
def test_gsl(self):
"""
GSLController: Tests for output shape
"""
controller = GSLController(self.problem)
# test one from each class
minimizers = ['lmsder',
'nmsimplex',
'conjugate_pr']
for minimizer in minimizers:
controller.minimizer = minimizer
self.shared_testing(controller)
controller.flag = 0
self.check_conveged(controller)
controller.flag = 1
self.check_max_iterations(controller)
controller.flag = 2
self.check_diverged(controller)
def test_ralfit(self):
"""
RALFitController: Tests for output shape
"""
controller = RALFitController(self.problem)
minimizers = ['gn', 'gn_reg', 'hybrid', 'hybrid_reg']
for minimizer in minimizers:
controller.minimizer = minimizer
self.shared_testing(controller)
controller._status = 0
self.check_conveged(controller)
controller._status = 2
self.check_diverged(controller)
def test_minuit(self):
"""
MinuitController: Tests for output shape
"""
controller = MinuitController(self.problem)
controller.minimizer = 'minuit'
self.shared_testing(controller)
controller._status = 0
self.check_conveged(controller)
controller._status = 2
self.check_diverged(controller)
def shared_testing(self, controller):
"""
Utility function to run controller and check output is in generic form
:param controller: Controller to test, with setup already completed
:type controller: Object derived from BaseSoftwareController
"""
controller.parameter_set = 0
controller.prepare()
controller.fit()
controller.cleanup()
assert len(controller.results) == len(controller.data_y)
assert len(controller.final_params) == len(controller.initial_params)
def check_conveged(self, controller):
"""
Utility function to check controller.cleanup() produces a success flag
:param controller: Controller to test, with setup already completed
:type controller: Object derived from BaseSoftwareController
"""
controller.cleanup()
assert controller.flag == 0
def check_max_iterations(self, controller):
"""
Utility function to check controller.cleanup() produces a maximum
iteration flag
:param controller: Controller to test, with setup already completed
:type controller: Object derived from BaseSoftwareController
"""
controller.cleanup()
assert controller.flag == 1
def check_diverged(self, controller):
"""
Utility function to check controller.cleanup() produces a fail
:param controller: Controller to test, with setup already completed
:type controller: Object derived from BaseSoftwareController
"""
controller.cleanup()
assert controller.flag == 2
class FactoryTests(TestCase):
"""
Tests for the ControllerFactory
"""
def test_imports(self):
"""
Test that the factory returns the correct class for inputs
"""
valid = ['scipy', 'mantid', 'sasview', 'ralfit']
invalid = ['foo', 'bar', 'hello', 'r2d2']
for software in valid:
controller = ControllerFactory.create_controller(software)
self.assertTrue(controller.__name__.lower().startswith(software))
for software in invalid:
self.assertRaises(exceptions.NoControllerError,
ControllerFactory.create_controller,
software)
| null | fitbenchmarking/controllers/tests/test_controllers.py | test_controllers.py | py | 10,749 | python | en | code | null | code-starcoder2 | 51 |
472058748 | """Trains a ResNet on the CIFAR10 dataset.
ResNet v1
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
ResNet v2
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
import numpy as np
import os
# Training params.
batch_size = 32
epochs = 180
data_augmentation = True
# | | | Orig Paper| | Orig Paper|
# Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | sec/epoch
# | | %Accuracy | %Accuracy | %Accuracy | %Accuracy | GTX 1080Ti
# ResNet20 | 3 | 91.95 | 91.25 | 92.57 | - | 58
# ResNet32 | 5 | 92.00 | 92.49 | 92.22 | - | 96
# ResNet44 | 7 | 91.07 | 92.83 | 91.02 | - | 128
# ResNet56 | 9 | 90.25 | 93.03 | 91.37 | - | 163
# ResNet110 | 18 | 90.23 | 93.39 | 91.22 | 93.63 | 330
n = 3
# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)
version = 1
# Subtracting pixel mean improves accuracy
use_pix_mean = True
# Network architecture params.
num_classes = 10
num_filters = 16
num_blocks = 3
num_sub_blocks = 2 * n
# Learning rate scheduler - called every epoch as part of callbacks
def lr_schedule(epoch):
lr = 1e-3
if n == 18:
lr = 1e-4
if epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print("Learning rate: ", lr)
return lr
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Input image dimensions.
# We assume data format "channels_last".
img_rows = x_train.shape[1]
img_cols = x_train.shape[2]
channels = x_train.shape[3]
if K.image_data_format() == 'channels_first':
img_rows = x_train.shape[2]
img_cols = x_train.shape[3]
channels = x_train.shape[1]
x_train = x_train.reshape(x_train.shape[0], channels, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], channels, img_rows, img_cols)
input_shape = (channels, img_rows, img_cols)
else:
img_rows = x_train.shape[1]
img_cols = x_train.shape[2]
channels = x_train.shape[3]
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, channels)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, channels)
input_shape = (img_rows, img_cols, channels)
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
if use_pix_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# Start model definition.
inputs = Input(shape=input_shape)
x = Conv2D(num_filters,
kernel_size=3,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# Instantiate convolutional base (stack of blocks).
for i in range(num_blocks):
for j in range(num_sub_blocks):
strides = 1
is_first_layer_but_not_first_block = j == 0 and i > 0
if is_first_layer_but_not_first_block:
strides = 2
y = Conv2D(num_filters,
kernel_size=3,
padding='same',
strides=strides,
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(x)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv2D(num_filters,
kernel_size=3,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(y)
y = BatchNormalization()(y)
if version == 2:
y = Activation('relu')(y)
if is_first_layer_but_not_first_block:
x = Conv2D(num_filters,
kernel_size=1,
padding='same',
strides=2,
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(x)
x = keras.layers.add([x, y])
if version != 2:
x = Activation('relu')(x)
num_filters = 2 * num_filters
# Add classifier on top.
x = AveragePooling2D()(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate and compile model.
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=lr_schedule(0)),
metrics=['accuracy'])
model.summary()
if version == 2:
print("ResNet v2")
else:
print("ResNet v1")
# Prepare model model saving directory.
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'cifar10_resnet_model.{epoch:02d}.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
# Prepare callbacks for model saving and for learning rate decaying.
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True)
lr_scheduler = LearningRateScheduler(lr_schedule)
callbacks = [checkpoint, lr_scheduler]
# Run training, with or without data augmentation.
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (deg 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally
height_shift_range=0.1, # randomly shift images vertically
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
validation_data=(x_test, y_test),
epochs=epochs, verbose=1, workers=4,
callbacks=callbacks)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
| null | chapter3/cifar10-resnet.3.2.1.py | cifar10-resnet.3.2.1.py | py | 7,922 | python | en | code | null | code-starcoder2 | 51 |
409562073 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import models, fields, api, _
class ResPartner(models.Model):
_inherit = 'res.partner'
pan_no = fields.Char('PAN Number')
gst_no = fields.Char('GST Number')
| null | gst/models/partner.py | partner.py | py | 285 | python | en | code | null | code-starcoder2 | 51 |
145846894 | #%%
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.optimize import fsolve
import phd.viz
_, palette = phd.viz.phd_style()
sns.set_palette('magma')
# Define functions to be used in figure
def pact(IPTG, K_A, K_I, e_AI):
'''
Computes the probability that a repressor is active
Parameters
----------
IPTG : array-like
Array of IPTG concentrations in uM
K_A : float
Dissociation constant for active repressor
K_I : float
Dissociation constant for inactive repressor
e_AI : float
Energetic difference between the active and inactive state
Returns
-------
probability that repressor is active
'''
pact = (1 + IPTG * 1 / K_A)**2 / \
(((1 + IPTG * 1 / K_A))**2 + np.exp(-e_AI) * (1 + IPTG * 1 / K_I)**2)
return pact
def fugacity(IPTG, R, Ns, e_s, K_A=139E-6, K_I=0.53E-6, e_AI=4.5, Nc=0, e_c=0):
'''
Solves for the fugacity of simple repression with
multiple promoter copies (Ns, with energy e_s) or competitor sites
(Nc, with energy e_c).
Parameters
----------
R : float
Number of repressors per cell
e_AI : float
Energetic difference between the active and inactive state
Ns : float
Number of specific operators available for repressor binding
Nc : float
Number of competitor operators available for repressor binding
e_s : float
Binding energy between specific operator and repressor as inferred in
Garcia 2011
e_c : float
Binding energy between competitor operator and repressor
K_A : float
Dissociation constant for active repressor
K_I : float
Dissociation constant for inactive repressor
e_AI : float
Energetic difference between the active and inactive state
Returns
-------
fugacity at each IPTG concentration
'''
NNS = 4.6E6
lam = []
def func(x): return -Reff + Ns * (x * np.exp(-e_s)) / (1 + x * np.exp(-e_s)) +\
NNS * (x) / (1 + x) + \
Nc * (x * np.exp(-e_c)) / (1 + x * np.exp(-e_c))
for c in IPTG:
Reff = R * pact(c, K_A, K_I, e_AI)
lam.append(fsolve(func, 0))
return np.array(lam)
def occupancy(lam, e_s):
'''
Computes fold-change for simple repression using the fugacity (lam).
Parameters
----------
lam : float
fugacity of system as calculated by fugacity()
e_s : float
binding energy of specific operator
Returns
-------
fold-change (occupancy)
'''
return 1 / (1 + lam * np.exp(-(e_s)))
# Define parameter values
ops = [-15.3, -13.9, -9.7]
op_names = ['O1', 'O2', 'O3']
fig_labels = ['(A)', '(B)', '(C)']
Nc = [1, 10, 50, 100, 250, 500]
Ns = [1]
IPTG = np.logspace(-8, -2, 100)
R = 260
e_c = -17.0
# Plot figure
fig, ax = plt.subplots(ncols=3, sharey=False, figsize=(6, 2))
phd.viz.despine(ax)
for i, a in enumerate(ax):
for N in Nc:
lam_array = fugacity(IPTG, R, Ns=1, e_s=ops[i], Nc=N, e_c=e_c)
fc = occupancy(lam_array, ops[i])
a.plot(IPTG*1E6, fc, label=N,)
a.set_xscale('log')
a.set_ylabel('fold-change')
a.set_xlabel('IPTG [µM]')
a.set_ylim(-0.01, 1.1)
a.set_xlim(1E-2, 1E4)
# Add figure text
phd.viz.titlebox(a,r'%s $\Delta \varepsilon_{RA}= %0.1f\ k_BT$' % (
op_names[i], ops[i]), bgcolor='white', color=_['black'],
boxsize='12%', pad=0.05, size=6)
a.text(-0.32, 1.05, fig_labels[i], transform=a.transAxes,
fontsize=8)
# Add legend
leg1 = ax[2].legend(title=r'$N_c$', loc='lower right', fontsize=6)
leg1.get_title().set_fontsize(6)
plt.tight_layout()
plt.savefig('../figs/ch6_figS5.pdf', bbox_inches='tight')
plt.savefig('../figs/ch6_figS5.png', bbox_inches='tight')
| null | src/chapter_06/code/ch6_figS5.py | ch6_figS5.py | py | 3,787 | python | en | code | null | code-starcoder2 | 51 |
517261063 | import numpy as np
import math
from src.PCA import PCA
from src.procrustes import Procrustes
from src.tooth import Tooth
class ModelFitter:
def fitModel(self, target_tooth, model):
self.procrustes = Procrustes()
self.pca = PCA()
eigenvectors = model.getEigenvectors()
Y = target_tooth
mean = Tooth(model.getMeanModel())
# Init
b = 0
X = mean
i = 0
while i < 20:
i += 1
X, b_new = self.step(Y, X, eigenvectors, mean)
if np.allclose(b, b_new):
break
else:
b = b_new
return X
def step(self, Y, X, eigenvectors, mean):
# Fit Y to X
Y_new = self.procrustes.allignDataToModel(Y,X)
# Project Y into X space and get new b
b = self.pca.project(Y_new.getLandmarks().flatten(), eigenvectors, mean.getLandmarks().flatten())
# print("b = " + str(b))
# Generate new model points X
X_new = self.pca.reconstruct(b, eigenvectors, mean.getLandmarks().flatten())
X_new = X_new.reshape((X_new.shape[0] // 2, 2))
return Tooth(X_new), b
| null | src/modelFitter.py | modelFitter.py | py | 1,216 | python | en | code | null | code-starcoder2 | 51 |
639465008 | #!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
from __future__ import print_function
import sys
import gc
import resource
import re
import logging
import time
import os
import codecs
import itertools
from datetime import timedelta
from optparse import OptionParser
import numpy as np
from scipy import sparse as sp
from sklearn.datasets import load_files
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from lexicon import Lexicon
import nltk
from nltk.corpus import mac_morpho
parser = OptionParser(usage="%prog [options] <datasetdir>")
parser.add_option("-e", "--encoding", dest="encoding", default="latin_1", help="Dataset encoding")
parser.add_option("-r", "--initial-ranking", dest="ranking_method", default="cosine_similarity", help="Initial ranking method (cosine_similarity, accuracy) Default: cosine_similarity")
if sys.stdout.encoding == None:
print("Fixing stdout encoding...")
import codecs
import locale
# Wrap sys.stdout into a StreamWriter to allow writing unicode.
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
(options, args) = parser.parse_args()
#if len(args) == 0:
#parser.print_help()
#sys.exit()
class SequentialSelection():
def __init__(self,tagger, tokenizer):
self.tagger = tagger
self.tokenizer = tokenizer
self.corpus, self.allowedDocs = self.getDocs("/home/markinhos/datasets/atribuna/")
def select_grammar(self, ranking):
ind = 0
resp = 'N'
result = []
while resp != 'S' and resp != 's':
if ind < len(ranking):
print("Analise as gramaticas a seguir:")
tkn = self.tokenizer.findall(ranking[ind][2])
idxs = ranking[ind][3]
tag_phrase = [self.tagger.tag([w])[0][1] for w in tkn]
print("[Pos = "+str(ranking[ind][0])+"]\n"+str(tkn[:idxs[0]])+('\033[1m'+str(tkn[idxs[0]:idxs[1]]))+('\033[0m'+str(tkn[idxs[1]:])))
print(ranking[ind][1]," => ",str(tag_phrase[:idxs[0]])+('\033[1m'+str(tag_phrase[idxs[0]:idxs[1]]))+('\033[0m'+str(tag_phrase[idxs[1]:])))
resp = input("Elas sao compativeis? Sim ou Nao { S | N } : ")
if resp == 'S'or resp == 's':
for x,y in enumerate(ranking):
if x >= ind:
result.append(y)
for x,y in enumerate(ranking):
if x < ind:
result.append(y)
else:
ind += 1
else:
print("Fim da lista. Indice resetado")
ind = 0
return result
def search_db_samples(self, grams):
dbdata = []
for pos, phrase in enumerate(self.corpus.split(".")):
tag_phrase = [tagger2.tag([w])[0][1] for w in tokenizer.findall(phrase)]
for i, gcan in enumerate(grams):
idxs = self.contains(gcan,tag_phrase)
if idxs != None:
del(grams[i])
dbdata.append([pos,list(gcan), phrase, idxs])
return dbdata
def readDocument(self, source):
with codecs.open(source, "r", encoding='iso-8859-1') as document:
return document.read()
def getDocs(self, resources):
docs = os.listdir(resources)
allowedDocs = []
corpus = []
for doc in docs:
if not doc[-1] == '~':
allowedDocs.append(doc)
document = self.readDocument("{0}/{1}".format(resources, doc))
corpus.append(document)
return " ".join(corpus), allowedDocs
def contains(self, small, big):
for i in range(len(big)-len(small)+1):
for j in range(len(small)):
if big[i+j] != small[j]:
break
else:
return i, i+len(small)
return None
class ElapsedFormatter():
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = record.created - self.start_time
#using timedelta here for convenient default formatting
elapsed = timedelta(seconds = elapsed_seconds)
return "[%s][RAM: %.2f MB] %s" % (str(elapsed)[:-3], (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024), record.getMessage())
#add custom formatter to root logger for simple demonstration
handler = logging.StreamHandler()
handler.setFormatter(ElapsedFormatter())
logging.getLogger().addHandler(handler)
log = logging.getLogger('main')
log.setLevel(logging.DEBUG)
with open("input.txt") as f:
text = f.readlines()
inputs = [i.replace("\n", "").lower().split(";") for i in text]
log.info(inputs)
#dataset_folder = args[0]
lexicon = Lexicon("Portuguese (Brazil)/Dela/")
def get_candidates(sentences):
candidates_simple = set()
candidates_med = set()
candidates_full = set()
tokenizer = re.compile('\w+')
for s in sentences:
sent_words = tokenizer.findall(s)
pos_full = []
pos_med = []
pos_simple = []
for w in sent_words:
lemmas = lexicon.get_lemmas(w)
pos_full += [set([p[1] for p in lemmas])]
pos_med += [set([p[1].split(":")[0] for p in lemmas])]
pos_simple += [set([p[1].split(":")[0].split("+")[0] for p in lemmas])]
#print(w, lemmas)
#print(pos_med)
#print(pos_simple)
if len(candidates_simple) == 0:
#print("TESTE",pos_simple)
candidates_simple = set(itertools.product(*pos_simple))
candidates_med = set(itertools.product(*pos_med))
candidates_full = set(itertools.product(*pos_full))
else:
candidates_simple = candidates_simple.intersection(set(itertools.product(*pos_simple)))
candidates_med = candidates_med.intersection(set(itertools.product(*pos_med)))
candidates_full = candidates_full.intersection(set(itertools.product(*pos_full)))
#print("ITERTOOLS")
#print(candidates_simple)
return candidates_simple, candidates_med, candidates_full
sentences = [s[1] for s in inputs]
log.info("Loading Mac-Morpho Tagged Sents...")
tsents = list(mac_morpho.tagged_sents())
def simplify_tag(t):
if "+" in t:
t = t[t.index("+")+1:]
if t == "ART":
return "DET"
return t
log.info("Simplifyng POS Tags...")
tsents = [[(w.lower(),simplify_tag(t)) for (w,t) in sent] for sent in tsents if sent]
train = tsents
test = tsents[:300]
log.info("Training POS Taggers...")
tagger0 = nltk.DefaultTagger('N')
tagger1 = nltk.UnigramTagger(train, backoff=tagger0)
tagger2 = nltk.BigramTagger(train, backoff=tagger1)
#log.info("Evaluate tagger")
#print(tagger2.evaluate(test))
#log.info("TAGSET")
#tags = [simplify_tag(tag) for (word,tag) in mac_morpho.tagged_words()]
#fd = nltk.FreqDist(tags)
#print(fd.keys())
tokenizer = re.compile('\w+')
for input_id, s in enumerate(sentences):
log.info("Sentence: %s" % (s))
candidates_simple, candidates_med, candidates_full = get_candidates([s])
#print(candidates_simple)
tagged_sent = [tagger2.tag([w])[0][1] for w in tokenizer.findall(s)]
#print(s, tagged_sent)
candidates_simple = np.array(list(candidates_simple))
tagged_sent = np.array(tagged_sent)
gram_acc = candidates_simple == tagged_sent
#print(gram_acc)
gram_acc = gram_acc.astype(np.float64).sum(axis=1) / gram_acc.shape[1]
#print(gram_acc)
log.info("Vectorizing...")
count_vect = CountVectorizer(dtype=np.float64, token_pattern='\w+')
X = [" ".join(tokens) for tokens in candidates_simple]
#print(X)
X_vect = count_vect.fit_transform(X)
#print(X_vect.todense())
tagged_sent_vect = count_vect.transform([" ".join(tagged_sent)])[0]
#print(tagged_sent_vect)
#print(X[0])
#print(" ".join(tagged_sent))
#print(tagged_sent_vect.todense())
log.info("(%d, %d)" % (X_vect.shape[0],X_vect.shape[1]))
gram_sim = cosine_similarity(X_vect, tagged_sent_vect)
#print(gram_sim)
if options.ranking_method == "cosine_similarity":
log.info("Using cosine_similarity ranking...")
gram_rank = gram_sim
elif options.ranking_method == "accuracy":
log.info("Using accuracy ranking...")
gram_rank = gram_acc
else:
log.warning("Unknown ranking method %s ignored, using cosine_similarity")
gram_rank = gram_sim
top_idx = np.argmax(gram_rank)
top_gram = candidates_simple[top_idx]
cpcand = []
sorted_grams_idx = np.argsort(-gram_rank, axis=0)
for i, gram_idx in enumerate(sorted_grams_idx):
cpcand.append(list(candidates_simple[gram_idx][0]))
print([(x) for x in cpcand])
selection_strategy = SequentialSelection(tagger2,tokenizer)
samples = selection_strategy.search_db_samples(cpcand)
gram_rank = selection_strategy.select_grammar(samples)
log.info("%s: Writing results..." % (inputs[input_id]))
'''with open("gram-%d.txt" % (input_id), 'w') as f:
f.write("%s\n" % (inputs[input_id]))
f.write("Meta: %s\n" % (" ".join(tagged_sent)))
sorted_grams_idx = np.argsort(-gram_rank, axis=0)
#print("sorted_grams_idx",sorted_grams_idx)
for i, gram_idx in enumerate(sorted_grams_idx):
gram = candidates_simple[gram_idx][0]
#print(gram_idx, gram)
f.write("%d: %s - %.03f\n" % (i, " ".join(gram), gram_rank[gram_idx]))
'''
with open("gram-%d.txt" % (input_id), 'w') as f:
f.write("%s\n" % (inputs[input_id]))
f.write("Meta: %s\n" % (" ".join(tagged_sent)))
for i, gram in enumerate(gram_rank):
tkn = tokenizer.findall(gram[2])
f.write(str(i)+": [pos "+str(gram[0])+"] "+str(gram[1])+" => - "+str(tkn[(gram[3])[0]:(gram[3])[1]])+"\n")
log.info("Finished") | null | inc-gram.py | inc-gram.py | py | 8,973 | python | en | code | null | code-starcoder2 | 51 |
433897581 | from turtle import TurtleScreen, RawTurtle, TK
from time import sleep
class Ventana():
def __init__(self, titulo, alto, ancho):
assert isinstance(titulo, str)
assert isinstance(alto, int) and alto > 0
assert isinstance(ancho, int) and ancho > 0
self.root = TK.Tk()
self.root.title(titulo)
self.canvas = TK.Canvas(self.root, width=ancho, height=alto)
self.canvas.pack()
self.fondo_ventana = TurtleScreen(self.canvas)
self.fondo_ventana.setworldcoordinates(0, alto, ancho, 0)
self.canvas["bg"] = "gold"
self.canvas.pack()
self.pencil = RawTurtle(self.fondo_ventana)
self.pencil.pencolor("white")
class Laberinto():
Xdis = 10
Ydis = 10
Alto = 25
Ancho= 25
@staticmethod
def deme_posicion(i, j):
x = Laberinto.Xdis + j * (Laberinto.Ancho + 1)
y = Laberinto.Ydis + i * (Laberinto.Alto + 1)
return (x, y)
def __init__(self, area_dibujo, laberinto):
lista = laberinto.split()
lista = [ x[:-1] if x[-1] == "\n" else x for x in lista]
lista = [[int(ch) for ch in x] for x in lista]
self.laberinto = lista
self.lienzo = area_dibujo
self.dibuja_laberinto()
def dibuja_laberinto(self):
self.lienzo.fondo_ventana.tracer(False)
self.lienzo.pencil.pencolor("white")
for i in range(len(self.laberinto)):
for j in range(len(self.laberinto[i])):
if self.laberinto[i][j] == 1:
self.casilla("black", i, j)
elif self.laberinto[i][j] == 3:
self.casilla("red", i, j)
elif self.laberinto[i][j] == 0:
self.casilla("white", i, j)
self.lienzo.fondo_ventana.tracer(True)
def casilla(self, color, i, j):
x, y = Laberinto.deme_posicion(i, j)
self.lienzo.pencil.fillcolor(color)
self.lienzo.pencil.pu()
self.lienzo.pencil.setpos(x, y)
self.lienzo.pencil.seth(0)
self.lienzo.pencil.pd()
self.lienzo.pencil.begin_fill()
for i in range(2):
self.lienzo.pencil.fd(Laberinto.Ancho+1)
self.lienzo.pencil.left(90)
self.lienzo.pencil.fd(Laberinto.Alto+1)
self.lienzo.pencil.left(90)
self.lienzo.pencil.end_fill()
def dibujar(self, sol):
sleep(1)
self.lienzo.fondo_ventana.tracer(False)
for i in range(len(sol)):
x = int(sol[i][0])
y = int(sol[i][1])
sleep(0.05)
self.casilla("darkgreen", x,y)
self.lienzo.fondo_ventana.tracer(True)
sleep(3)
def principal(dibujo, sol):
ll = Laberinto(Ventana("Laberinto", 255, 235), dibujo)
ll.dibujar(sol)
| null | laberinto.py | laberinto.py | py | 2,825 | python | en | code | null | code-starcoder2 | 51 |
56631208 | """empty message
Revision ID: 840daf4878a2
Revises: 6fb829e3b6f1
Create Date: 2019-02-06 15:44:35.760937
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '840daf4878a2'
down_revision = '6fb829e3b6f1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('customer',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('reception_address', sa.String(), nullable=True),
sa.Column('credit_card_number', sa.String(), nullable=True),
sa.Column('discount', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('department',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('employee',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('department_id', sa.Integer(), nullable=True),
sa.Column('account_number', sa.String(), nullable=True),
sa.Column('charge', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['department_id'], ['department.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('account_number')
)
op.drop_constraint('user_name_key', 'user', type_='unique')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('user_name_key', 'user', ['name'])
op.drop_table('employee')
op.drop_table('department')
op.drop_table('customer')
# ### end Alembic commands ###
| null | migrations/versions/840daf4878a2_.py | 840daf4878a2_.py | py | 2,079 | python | en | code | null | code-starcoder2 | 50 |
68212656 | from django.http import Http404
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.cache import cache_page
from wp_main.utilities import responses, utilities
from wp_main.utilities.wp_logging import logger
#from misc.models import wp_misc
from misc import tools as misctools
_log = logger('misc').log
@cache_page(15 * 60)
@csrf_protect
def view_index(request):
""" Main index for Misc objects. """
miscobjs = misctools.get_visible_objects()
context = {'request': request,
'extra_style_link_list': [utilities.get_browser_style(request),
'/static/css/misc.min.css',
'/static/css/highlighter.min.css'],
'miscobjects': miscobjs,
}
return responses.clean_response_req("misc/index.html",
context,
request=request)
@cache_page(15 * 60)
@csrf_protect
def view_misc_any(request, identifier):
""" View a specific misc item. """
misc = misctools.get_by_identifier(identifier)
if not misc:
# No misc item found by that identifier
raise Http404()
context = {'request': request,
'extra_style_link_list': [utilities.get_browser_style(request),
'/static/css/misc.min.css',
'/static/css/highlighter.min.css'],
'misc': misc,
}
return responses.clean_response_req('misc/misc.html',
context,
request=request)
| null | misc/views.py | views.py | py | 1,715 | python | en | code | null | code-starcoder2 | 50 |
601940313 | import tkinter as tk
window = tk.Tk()
window.title('my window')
window.geometry('200x200')
# entry放在window上面
# 如果要设置为密码的形式,则将show='*'
e = tk.Entry(window, show=None)
e.pack()
# 设置插入方式为insert,即光标处插入
def insert_point():
var = e.get()
t.insert('insert', var)
# 设置插入方式为end,即尾部插入
def insert_end():
var = e.get()
t.insert('end', var)
# 第一个按键,在光标处插入文本
b1 = tk.Button(window, text='insert point', width=15,
height=4, command=insert_point)
b1.pack()
# 第二个按键,在尾部插入文本
b2 = tk.Button(window, text='insert end',
command=insert_end)
b2.pack()
# 创建一个text,也放在window上面
t = tk.Text(window, height=2)
t.pack()
window.mainloop()
| null | tkdemo2/demo2.py | demo2.py | py | 829 | python | en | code | null | code-starcoder2 | 50 |
259890367 | from tkinter import *
import tkinter as tk
from tkinter import ttk
from tkinter import font as tkfont
import tkinter.messagebox
import os
import sqlite3
#####################################################################################################################################################################################
class sis(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
all_frame = tk.Frame(self)
all_frame.pack(side="top", fill="both", expand = True)
all_frame.rowconfigure(0, weight=1)
all_frame.columnconfigure(0, weight=1)
self.frames = {}
for F in (Students, Home, Courses):
frame = F(all_frame, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show(Home)
def show(self, page_number):
frame = self.frames[page_number]
frame.tkraise()
#####################################################################################################################################################################################
class Home(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
leftcolor = tk.Label(self, height = 1260, width =550, bg = "maroon")
leftcolor.place(x=0, y=0)
label = tk.Label(self, text="STUDENT INFORMATION SYSTEM", bg= "gold", fg= "white", relief=RIDGE,font=("Arial bold", 45))
label.place(x=130,y=20)
home = tk.Button(self, text="HOME",font=("Arial",18,"bold"), height = 1, width = 12,relief=RIDGE, bg="gold", fg="white", command=lambda: controller.show(Home))
home.place(x=210,y=465)
home.config(cursor= "hand2")
course = tk.Button(self, text="COURSES",font=("Arial",18,"bold"),height = 1, width = 12,relief=RIDGE, bg="gold", fg="white", command=lambda: controller.show(Courses))
course.place(x=880,y=465)
course.config(cursor= "hand2")
students = tk.Button(self, text="STUDENTS",font=("Arial",18, "bold"), height = 1, width = 12,relief=RIDGE, bg="gold", fg="white", command=lambda: controller.show(Students))
students.place(x=540,y=465)
students.config(cursor= "hand2")
self.students=Button(self, font=("Cooper Black",20), padx=5, width=25,height=8, bd=0, text=" ""STUDENT INFORMATION",anchor=W, bg="gold",fg="white", command=lambda: controller.show(Students))
self.students.config(cursor= "hand2")
self.students.place(x=120,y=140)
self.course=Button(self, font=("Cooper Black",20), padx =5, width=25, height=8, bd=0, text=" ""LISTS OF COURSES",anchor=W, bg="gold",fg="white", command=lambda: controller.show(Courses))
self.course.config(cursor= "hand2")
self.course.place(x=670,y=140)
#####################################################################################################################################################################################
class Courses(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
leftcolor = tk.Label(self, height = 1260, width =550, bg = "maroon")
leftcolor.place(x=0, y=0)
self.controller = controller
self.controller.title("Student Information System")
label = tk.Label(self, text="COURSE/S",bg= "gold", fg= "white", relief=RIDGE, font=("Arial", 40, "bold"))
label.place(x=500,y=20)
Course_Code = StringVar()
Course_Name = StringVar()
SearchBar_Var = StringVar()
def tablec():
conn = sqlite3.connect("sis_v2.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("CREATE TABLE IF NOT EXISTS courses (Course_Code TEXT PRIMARY KEY, Course_Name TEXT)")
conn.commit()
conn.close()
def add_course():
if Course_Code.get() == "" or Course_Name.get() == "" :
tkinter.messagebox.showinfo("Course/s", "Fill in the box")
else:
conn = sqlite3.connect("sis_v2.db")
c = conn.cursor()
c.execute("INSERT INTO courses(Course_Code,Course_Name) VALUES (?,?)",(Course_Code.get(),Course_Name.get()))
conn.commit()
conn.close()
Course_Code.set('')
Course_Name.set('')
tkinter.messagebox.showinfo("Course/s", "Course Added Successfully!")
display_course()
def display_course():
self.course_list.delete(*self.course_list.get_children())
conn = sqlite3.connect("sis_v2.db")
cur = conn.cursor()
cur.execute("SELECT * FROM courses")
rows = cur.fetchall()
for row in rows:
self.course_list.insert("", tk.END, text=row[0], values=row[0:])
conn.close()
def update_course():
for selected in self.course_list.selection():
conn = sqlite3.connect("sis_v2.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("UPDATE courses SET Course_Code=?, Course_Name=? WHERE Course_Code=?", (Course_Code.get(),Course_Name.get(), self.course_list.set(selected, '#1')))
conn.commit()
tkinter.messagebox.showinfo("Course/s", "Course Updated Successfully!")
display_course()
clear()
conn.close()
def edit_course():
x = self.course_list.focus()
if x == "":
tkinter.messagebox.showerror("Course/s", "Select a course!")
return
values = self.course_list.item(x, "values")
Course_Code.set(values[0])
Course_Name.set(values[1])
def delete_course():
try:
messageDelete = tkinter.messagebox.askyesno("Student Info", "Are you sure you want to delete this record?")
if messageDelete > 0:
con = sqlite3.connect("sis_v2.db")
cur = con.cursor()
x = self.course_list.selection()[0]
id_no = self.course_list.item(x)["values"][0]
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("DELETE FROM courses WHERE Course_Code = ?",(id_no,))
con.commit()
self.course_list.delete(x)
tkinter.messagebox.showinfo("Course/s", "Course deleted!")
display_course()
con.close()
except:
tkinter.messagebox.showerror("Course/s", "This course has students!")
def search_course():
Course_Code = SearchBar_Var.get()
con = sqlite3.connect("sis_v2.db")
cur = con.cursor()
cur.execute("SELECT * FROM courses WHERE Course_Code = ?",(Course_Code,))
con.commit()
self.course_list.delete(*self.course_list.get_children())
rows = cur.fetchall()
for row in rows:
self.course_list.insert("", tk.END, text=row[0], values=row[0:])
con.close()
def clear():
Course_Code.set('')
Course_Name.set('')
def OnDoubleclick(event):
item = self.course_list.selection()[0]
values = self.course_list.item(item, "values")
Course_Code.set(values[0])
Course_Name.set(values[1])
home = tk.Button(self, text="HOME",font=("Arial",18,"bold"), height = 1, width = 12,relief=RIDGE, bg="gold", fg="white", command=lambda: controller.show(Home))
home.place(x=210,y=465)
home.config(cursor= "hand2")
course = tk.Button(self, text="COURSES",font=("Arial",18,"bold"), height = 1, width = 12,relief=RIDGE, bg="gold", fg="white", command=lambda: controller.show(Courses))
course.place(x=880,y=465)
course.config(cursor= "hand2")
student = tk.Button(self, text="STUDENTS",font=("Arial",18,"bold"), height = 1, width = 12,relief=RIDGE, bg="gold", fg="white", command=lambda: controller.show(Students))
student.place(x=540,y=465)
student.config(cursor= "hand2")
self.lblccode = Label(self, font=("Arial", 17, "bold"), text="Course Code:", bg= "gold", fg= "white", relief=RIDGE, padx=5, pady=5)
self.lblccode.place(x=30,y=144)
self.txtccode = Entry(self, font=("Arial", 17), textvariable=Course_Code, width=20)
self.txtccode.place(x=200,y=150)
self.lblcname = Label(self, font=("Arial", 17,"bold"), text="Course Name:",bg= "gold", fg= "white", relief=RIDGE, padx=5, pady=5)
self.lblcname.place(x=30,y=205)
self.txtcname = Entry(self, font=("Arial", 17), textvariable=Course_Name, width=35)
self.txtcname.place(x=70,y=250)
self.SearchBar = Entry(self, font=("Arial", 15), textvariable=SearchBar_Var, bd=3, width=20)
self.SearchBar.place(x=850,y=102)
scrollbar = Scrollbar(self, orient=VERTICAL)
scrollbar.place(x=1215,y=140,height=290)
self.course_list = ttk.Treeview(self, columns=("Course Code","Course Name"), height = 13, yscrollcommand=scrollbar.set)
self.course_list.heading("Course Code", text="Course Code", anchor=W)
self.course_list.heading("Course Name", text="Course Name",anchor=W)
self.course_list['show'] = 'headings'
self.course_list.column("Course Code", width=200, anchor=W, stretch=False)
self.course_list.column("Course Name", width=430, stretch=False)
self.course_list.bind("<Double-1> ", OnDoubleclick)
self.course_list.place(x=575,y=140)
scrollbar.config(command=self.course_list.yview)
self.lblccode = Label(self,height = 8, width = 65,relief=RIDGE, bg="orange", fg="white", padx=5, pady=5)
self.lblccode.place(x=90,y=305)
## Buttons
self.adds = Button(self, text="ADD", font=("Arial",17,"bold"),bd=0, width = 10, bg="gold", fg="white",command=add_course)
self.adds.place(x=100,y=320)
self.adds.config(cursor= "hand2")
self.update = Button(self, text="UPDATE", font=("Arial",17,"bold"),bd=0, width = 10, bg="gold", fg="white", command=update_course)
self.update.place(x=100,y=380)
self.update.config(cursor= "hand2")
self.clear = Button(self, text="CLEAR", font=("Arial",17,"bold"),bd=0, width = 10, bg="gold", fg="white", command=clear)
self.clear.place(x=394,y=320)
self.clear.config(cursor= "hand2")
self.delete = Button(self, text="DELETE", font=("Arial",17,"bold"),bd=0, width = 10, bg="gold", fg="white", command=delete_course)
self.delete.place(x=394,y=380)
self.delete.config(cursor= "hand2")
self.search = Button(self, text="SEARCH", font=("Arial",14,"bold"),bd=0, width = 10, bg="gold", fg="white", command=search_course)
self.search.place(x=1080,y=100)
self.search.config(cursor= "hand2")
self.display = Button(self, text="DISPLAY", font=("Arial",14,"bold"),bd=0, width = 10, bg="gold", fg="white", command=display_course)
self.display.place(x=575,y=103)
self.display.config(cursor= "hand2")
tablec()
display_course()
#####################################################################################################################################################################################
class Students(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
leftcolor = tk.Label(self, height = 1260, width =550, bg = "maroon")
leftcolor.place(x=0, y=0)
self.controller = controller
self.controller.title("Student Information System")
label = tk.Label(self, text="STUDENT INFORMATION",bg= "gold", fg= "white", relief=RIDGE, font=("Arial", 40, "bold"))
label.place(x=320,y=20)
home = tk.Button(self, text="HOME",font=("Arial",18,"bold"), height = 1, width = 12,relief=RIDGE, bg="gold", fg="white", command=lambda: controller.show(Home))
home.place(x=210,y=465)
home.config(cursor= "hand2")
course = tk.Button(self, text="COURSES",font=("Arial",18,"bold"), height = 1, width = 12,relief=RIDGE, bg="gold", fg="white", command=lambda: controller.show(Courses))
course.place(x=880,y=465)
course.config(cursor= "hand2")
student = tk.Button(self, text="STUDENTS",font=("Arial",18,"bold"), height = 1, width = 12,relief=RIDGE, bg="gold", fg="white", command=lambda: controller.show(Students))
student.place(x=540,y=465)
student.config(cursor= "hand2")
Student_ID = StringVar()
Student_Name = StringVar()
Student_YearLevel = StringVar()
Student_Gender = StringVar()
Course_Code = StringVar()
SearchBar_Var = StringVar()
def tables():
conn = sqlite3.connect("sis_v2.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("CREATE TABLE IF NOT EXISTS students (Student_ID TEXT PRIMARY KEY, Student_Name TEXT, Course_Code TEXT, \
Student_YearLevel TEXT, Student_Gender TEXT, \
FOREIGN KEY(Course_Code) REFERENCES courses(Course_Code) ON UPDATE CASCADE)")
conn.commit()
conn.close()
def add_stud():
if Student_ID.get() == "" or Student_Name.get() == "" or Course_Code.get() == "" or Student_YearLevel.get() == "" or Student_Gender.get() == "":
tkinter.messagebox.showinfo("Student Information", "Fill in the box")
else:
ID = Student_ID.get()
ID_list = []
for i in ID:
ID_list.append(i)
a = ID.split("-")
if len(a[0]) == 4:
if "-" in ID_list:
if len(a[1]) == 1:
tkinter.messagebox.showerror("Student Information", "ID Format:YYYY-NNNN")
elif len(a[1]) ==2:
tkinter.messagebox.showerror("Student Information", "ID Format:YYYY-NNNN")
elif len(a[1]) ==3:
tkinter.messagebox.showerror("Student Information", "ID Format:YYYY-NNNN")
else:
x = ID.split("-")
year = x[0]
number = x[1]
if year.isdigit()==False or number.isdigit()==False:
try:
tkinter.messagebox.showerror("Student Information", "Invalid ID")
except:
pass
elif year==" " or number==" ":
try:
tkinter.messagebox.showerror("Student Information", "Invalid ID")
except:
pass
else:
try:
conn = sqlite3.connect("sis_v2.db")
c = conn.cursor()
c.execute("PRAGMA foreign_keys = ON")
c.execute("INSERT INTO students(Student_ID,Student_Name,Course_Code,Student_YearLevel,Student_Gender) VALUES (?,?,?,?,?)",\
(Student_ID.get(),Student_Name.get(),Course_Code.get(),Student_YearLevel.get(), Student_Gender.get()))
tkinter.messagebox.showinfo("Student Information", "Student Added Successfully!")
conn.commit()
clear()
display_stud()
conn.close()
except:
ids=[]
conn = sqlite3.connect("sis_v2.db")
c = conn.cursor()
c.execute("SELECT * FROM students")
rows = c.fetchall()
for row in rows:
ids.append(row[0])
if ID in ids:
tkinter.messagebox.showerror("Student Information", "ID already exists")
else:
tkinter.messagebox.showerror("Student Information", "Course Unavailable")
else:
tkinter.messagebox.showerror("Student Information", "Invalid ID")
else:
tkinter.messagebox.showerror("Student Information", "Invalid ID")
def update_stud():
if Student_ID.get() == "" or Student_Name.get() == "" or Course_Code.get() == "" or Student_YearLevel.get() == "" or Student_Gender.get() == "":
tkinter.messagebox.showinfo("Student Information", "Select a student")
else:
for selected in self.studentlist.selection():
conn = sqlite3.connect("sis_v2.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("UPDATE students SET Student_ID=?, Student_Name=?, Course_Code=?, Student_YearLevel=?,Student_Gender=?\
WHERE Student_ID=?", (Student_ID.get(),Student_Name.get(),Course_Code.get(),Student_YearLevel.get(), Student_Gender.get(),\
self.studentlist.set(selected, '#1')))
conn.commit()
tkinter.messagebox.showinfo("Student Information", "Student record updated!")
display_stud()
clear()
conn.close()
def delete_stud():
try:
messageDelete = tkinter.messagebox.askyesno("Student Information", "Are you sure you want to delete this record?")
if messageDelete > 0:
con = sqlite3.connect("sis_v2.db")
cur = con.cursor()
x = self.studentlist.selection()[0]
id_no = self.studentlist.item(x)["values"][0]
cur.execute("DELETE FROM students WHERE Student_ID = ?",(id_no,))
con.commit()
self.studentlist.delete(x)
tkinter.messagebox.showinfo("Student Information", "Student record deleted successfully!")
display_stud()
clear()
con.close()
except Exception as e:
print(e)
def search_stud():
Student_ID = SearchBar_Var.get()
try:
con = sqlite3.connect("sis_v2.db")
cur = con.cursor()
cur .execute("PRAGMA foreign_keys = ON")
cur.execute("SELECT * FROM students")
con.commit()
self.studentlist.delete(*self.studentlist.get_children())
rows = cur.fetchall()
for row in rows:
if row[0].startswith(Student_ID):
self.studentlist.insert("", tk.END, text=row[0], values=row[0:])
con.close()
except:
tkinter.messagebox.showerror("Student Information", "Invalid ID")
def display_stud():
self.studentlist.delete(*self.studentlist.get_children())
conn = sqlite3.connect("sis_v2.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("SELECT * FROM students")
rows = cur.fetchall()
for row in rows:
self.studentlist.insert("", tk.END, text=row[0], values=row[0:])
conn.close()
def edit_stud():
x = self.studentlist.focus()
if x == "":
tkinter.messagebox.showerror("Student Information", "Select a record")
return
values = self.studentlist.item(x, "values")
Student_ID.set(values[0])
Student_Name.set(values[1])
Course_Code.set(values[2])
Student_YearLevel.set(values[3])
Student_Gender.set(values[4])
def clear():
Student_ID.set('')
Student_Name.set('')
Student_YearLevel.set('')
Student_Gender.set('')
Course_Code.set('')
def OnDoubleClick(event):
item = self.studentlist.selection()[0]
values = self.studentlist.item(item, "values")
Student_ID.set(values[0])
Student_Name.set(values[1])
Course_Code.set(values[2])
Student_YearLevel.set(values[3])
Student_Gender.set(values[4])
self.lblccode = Label(self,height = 3, width =78,relief=RIDGE, bg="orange", fg="white", padx=5, pady=5)
self.lblccode.place(x=22,y=400)
self.lblid = Label(self, font=("Arial", 14, "bold"), text="ID Number:", bg= "gold", fg= "white", padx=20, pady=5)
self.lblid.place(x=40,y=144)
self.txtid = Entry(self, font=("Arial", 14), textvariable=Student_ID, width=27)
self.txtid.place(x=210,y=150)
self.lblname = Label(self, font=("Arial", 14, "bold"), text="Name:", bg= "gold", fg= "white", padx=38, pady=5)
self.lblname.place(x=40,y=195)
self.txtname = Entry(self, font=("Arial", 14), textvariable=Student_Name, width=27)
self.txtname.place(x=210,y=200)
self.lblc = Label(self, font=("Arial", 14, "bold"), text="Course:", bg= "gold", fg= "white", padx=35, pady=5)
self.lblc.place(x=40,y=240)
self.txtc = Entry(self,font=("Arial", 14), textvariable=Course_Code, width=27)
self.txtc.place(x=210,y=246)
self.lblyear = Label(self,font=("Arial", 14, "bold"), text="Year Level:", bg= "gold", fg= "white", padx=19, pady=5)
self.lblyear.place(x=40,y=295)
self.txtyear = ttk.Combobox(self, value=["1st Year", "2nd Year", "3rd Year", "4th Year"], state="readonly", font=("Arial", 14), textvariable=Student_YearLevel, width=26)
self.txtyear.place(x=210,y=305)
self.lblgender = Label(self, font=("Arial", 14, "bold"), text="Gender:", bg= "gold", fg= "white", padx=32, pady=5)
self.lblgender.place(x=40,y=350)
self.txtgender = ttk.Combobox(self, value=["Male", "Female"], font=("Arial", 14), state="readonly", textvariable=Student_Gender, width=26)
self.txtgender.place(x=210,y=356)
self.SearchBar = Entry(self, font=("Arial", 11), textvariable=SearchBar_Var, bd=3, width=34)
self.SearchBar.place(x=870,y=105)
## Treeview
scrollbar = Scrollbar(self, orient=VERTICAL)
scrollbar.place(x=1230,y=140,height=305)
self.studentlist = ttk.Treeview(self, columns=("ID Number", "Name", "Course", "Year Level", "Gender"), height = 14, yscrollcommand=scrollbar.set)
self.studentlist.heading("ID Number", text="ID Number", anchor=W)
self.studentlist.heading("Name", text="Name",anchor=W)
self.studentlist.heading("Course", text="Course",anchor=W)
self.studentlist.heading("Year Level", text="Year Level",anchor=W)
self.studentlist.heading("Gender", text="Gender",anchor=W)
self.studentlist['show'] = 'headings'
self.studentlist.column("ID Number", width=100, anchor=W, stretch=False)
self.studentlist.column("Name", width=200, stretch=False)
self.studentlist.column("Course", width=130, anchor=W, stretch=False)
self.studentlist.column("Year Level", width=100, anchor=W, stretch=False)
self.studentlist.column("Gender", width=100, anchor=W, stretch=False)
self.studentlist.bind("<Double-1>",OnDoubleClick)
self.studentlist.place(x=590,y=140)
scrollbar.config(command=self.studentlist.yview)
## Buttons
self.add = Button(self, text="ADD", font=("Arial", 16, "bold"), bg= "gold", fg= "white", padx= 20, command=add_stud)
self.add.place(x=35,y=408)
self.add.config(cursor= "hand2")
self.update = Button(self, text="UPDATE", font=("Arial", 16, "bold"), bg= "gold", fg= "white",padx= 10, command=update_stud)
self.update.place(x=175,y=408)
self.update.config(cursor= "hand2")
self.clear = Button(self, text="CLEAR", font=("Arial", 16, "bold"), bg= "gold", fg= "white", padx= 10,command=clear)
self.clear.place(x=330,y=408)
self.clear.config(cursor= "hand2")
self.delete = Button(self, text="DELETE", font=("Arial",16, "bold"), bg= "gold", fg= "white",padx= 6, command=delete_stud)
self.delete.place(x=460,y=408)
self.delete.config(cursor= "hand2")
self.search = Button(self, text="SEARCH", font=("Arial", 14, "bold"),bd=0, bg= "gold", fg="white", command=search_stud)
self.search.place(x=1160,y=100)
self.search.config(cursor= "hand2")
self.display = Button(self, text="DISPLAY", font=("Arial", 14, "bold"), bd=0, bg= "gold", fg="white",command = display_stud)
self.display.place(x=588,y=97)
self.display.config(cursor= "hand2")
tables()
display_stud()
#####################################################################################################################################################################################
root = sis()
root.geometry("1260x550")
root.mainloop()
| null | SIS2.py | SIS2.py | py | 27,452 | python | en | code | null | code-starcoder2 | 50 |
639915911 | """"Indigo UI URLs
Copyright 2015 Archive Analytics Solutions
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf import settings
from django.conf.urls import include, url
from django.views.generic import TemplateView
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', 'indigo_ui.views.home', name='home'),
url(r'^archive/', include('archive.urls', namespace="archive")),
url(r'^node/', include('nodes.urls', namespace="nodes")),
url(r'^users/', include('users.urls', namespace="users")),
url(r'^groups/', include('groups.urls', namespace="groups")),
url(r'^activity/', include('activity.urls', namespace="activity")),
url(r'^about$', TemplateView.as_view(template_name='about.html'), name='about'),
url(r'^contact$', TemplateView.as_view(template_name='contact.html'), name='contact'),
url(r'^api/cdmi/', include('cdmi.urls', namespace="cdmi")),
url(r'^api/admin/', include('admin.urls', namespace="admin")),
url(r'^api/triple/', include('triple_api.urls', namespace="triple_api")),
url(r'^triple/', include('triple_ui.urls', namespace="triple_ui")),
url(r'^listener/', include('listener.urls', namespace="listener")),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| null | indigo-web/indigo_ui/urls.py | urls.py | py | 1,758 | python | en | code | null | code-starcoder2 | 50 |
83059798 | #importing modules
import numpy as np
import pandas as pd
from apyori import apriori
#importing the csv dataset
dataset = pd.read_csv('animeDataSet.csv')
#converting the genre column datatype to string
dataset.genre = dataset.genre.astype('str')
#appending the values of genre column in dataset to a list
genre_list=[]
for i in range(0,6668):
genre_list.append([dataset.values[i,28]])
#declare another list and append the elements of genre_list by splitting them with ','
genre_list_mod=[]
for i in range(0,6668):
for j in genre_list[i]:
genre_list_mod.append(j.split(','))
#generating rules with apriori algorithm
rules = list(apriori(genre_list_mod, min_support=0.03, min_confidence=0.6, min_lift=3, min_length=2))
#visualizing the rules
for i in rules:
print(i,"\n") | null | AprioriCode.py | AprioriCode.py | py | 795 | python | en | code | null | code-starcoder2 | 50 |
485603035 | import asyncio
import json
import logging
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from functools import wraps
from pathlib import Path
from typing import List, Optional, Tuple
import attr
from .datcore import DatcoreClient
from .models import FileMetaData, FileMetaDataEx
FileMetaDataVec = List[FileMetaData]
FileMetaDataExVec = List[FileMetaDataEx]
CURRENT_DIR = Path(__file__).resolve().parent
logger = logging.getLogger(__name__)
# pylint: disable=W0703
@contextmanager
def safe_call(error_msg: str = "", *, skip_logs: bool = False):
try:
yield
except AttributeError:
if not skip_logs:
logger.warning("Calling disabled client. %s", error_msg)
except Exception: # pylint: disable=broad-except
if error_msg and not skip_logs:
logger.warning(error_msg, exc_info=True)
# TODO: Use async callbacks for retreival of progress and pass via rabbit to server
def make_async(func):
@wraps(func)
async def async_wrapper(self, *args, **kwargs):
blocking_task = self.loop.run_in_executor(
self.pool, func, self, *args, **kwargs
)
_completed, _pending = await asyncio.wait([blocking_task])
results = [t.result() for t in _completed]
# TODO: does this always work?
return results[0]
return async_wrapper
class DatcoreWrapper:
""" Wrapper to call the python2 api from datcore
This can go away now. Next cleanup round...
NOTE: Auto-disables client
"""
def __init__(
self, api_token: str, api_secret: str, loop: object, pool: ThreadPoolExecutor
):
self.api_token = api_token
self.api_secret = api_secret
self.loop = loop
self.pool = pool
try:
self.d_client = DatcoreClient(
api_token=api_token,
api_secret=api_secret,
host="https://api.blackfynn.io",
)
except Exception:
self.d_client = None # Disabled: any call will raise AttributeError
logger.warning("Failed to setup datcore. Disabling client.", exc_info=True)
@property
def is_communication_enabled(self) -> bool:
""" Wrapper class auto-disables if client cannot be created
e.g. if endpoint service is down
:return: True if communication with datcore is enabled
:rtype: bool
"""
return self.d_client is not None
@make_async
def list_files_recursively(self) -> FileMetaDataVec: # pylint: disable=W0613
files = []
with safe_call(error_msg="Error listing datcore files"):
files = self.d_client.list_files_recursively()
return files
@make_async
def list_files_raw(self) -> FileMetaDataExVec: # pylint: disable=W0613
files = []
with safe_call(error_msg="Error listing datcore files"):
files = self.d_client.list_files_raw()
return files
@make_async
def list_files_raw_dataset(
self, dataset_id: str
) -> FileMetaDataExVec: # pylint: disable=W0613
files = []
with safe_call(error_msg="Error listing datcore files"):
files = self.d_client.list_files_raw_dataset(dataset_id)
return files
@make_async
def delete_file(self, destination: str, filename: str) -> bool:
# the object can be found in dataset/filename <-> bucket_name/object_name
ok = False
with safe_call(error_msg="Error deleting datcore file"):
ok = self.d_client.delete_file(destination, filename)
return ok
@make_async
def delete_file_by_id(self, file_id: str) -> bool:
ok = False
with safe_call(error_msg="Error deleting datcore file"):
ok = self.d_client.delete_file_by_id(file_id)
return ok
@make_async
def download_link(self, destination: str, filename: str) -> str:
url = ""
with safe_call(error_msg="Error getting datcore download link"):
url = self.d_client.download_link(destination, filename)
return url
@make_async
def download_link_by_id(self, file_id: str) -> Tuple[str, str]:
url = ""
filename = ""
with safe_call(error_msg="Error getting datcore download link"):
url, filename = self.d_client.download_link_by_id(file_id)
return url, filename
@make_async
def create_test_dataset(self, dataset_name: str) -> Optional[str]:
with safe_call(error_msg="Error creating test dataset"):
ds = self.d_client.get_dataset(dataset_name)
if ds is not None:
self.d_client.delete_files(dataset_name)
else:
ds = self.d_client.create_dataset(dataset_name)
return ds.id
return None
@make_async
def delete_test_dataset(self, dataset) -> None:
with safe_call(error_msg="Error deleting test dataset"):
ds = self.d_client.get_dataset(dataset)
if ds is not None:
self.d_client.delete_files(dataset)
@make_async
def upload_file(
self, destination: str, local_path: str, meta_data: FileMetaData = None
) -> bool:
ok = False
str_meta = json.dumps(attr.asdict(meta_data)) if meta_data else ""
with safe_call(error_msg="Error uploading file to datcore"):
if str_meta:
meta_data = json.loads(str_meta)
ok = self.d_client.upload_file(destination, local_path, meta_data)
else:
ok = self.d_client.upload_file(destination, local_path)
return ok
@make_async
def upload_file_to_id(self, destination_id: str, local_path: str) -> Optional[str]:
_id = None
with safe_call(error_msg="Error uploading file to datcore"):
_id = self.d_client.upload_file_to_id(destination_id, local_path)
return _id
@make_async
def create_collection(
self, destination_id: str, collection_name: str
) -> Optional[str]:
_id = None
with safe_call(error_msg="Error creating collection in datcore"):
_id = self.d_client.create_collection(destination_id, collection_name)
return _id
@make_async
def list_datasets(self) -> List:
data = []
with safe_call(error_msg="Error creating collection in datcore"):
data = self.d_client.list_datasets()
return data
@make_async
def ping(self) -> bool:
ok = False
with safe_call(skip_logs=True):
profile = self.d_client.profile()
ok = profile is not None
return ok
| null | services/storage/src/simcore_service_storage/datcore_wrapper.py | datcore_wrapper.py | py | 6,736 | python | en | code | null | code-starcoder2 | 51 |
602885216 | from pprint import pprint
import json
import constants as constants
class ReferenceUtil(object):
def GetAbilityReferenceDict(self, abilityRefPath):
# Convert ability json file to ability json object
with open(abilityRefPath, 'r', encoding='utf-8-sig') as json_file:
abilityRefJson = json_file.read()
abilityRefObj = json.loads(abilityRefJson)
# Convert ability json object to ability dictionary
abilityRefDict = {} # {5630, 'Phoenix-Ult'}
for abilityPair in abilityRefObj[constants.ABILITY_JSON_TITLE_KEY]:
abilityRefDict[int(abilityPair[constants.REFERENCE_JSON_ID])] = abilityPair[constants.REFERENCE_JSON_NAME]
return abilityRefDict
def GetHeroReferenceDict(self, heroRefPath):
# Convert hero json file to hero json object
with open(heroRefPath, 'r', encoding='utf-8-sig') as json_file:
heroModelRefJson = json_file.read()
heroModelRefObj = json.loads(heroModelRefJson)
# Convert hero json object to hero dictionary
heroRefDict = {} # {}
for heroPair in heroModelRefObj[constants.HERO_JSON_TITLE_KEY]:
heroRefDict[int(heroPair[constants.REFERENCE_JSON_ID])] = heroPair[constants.REFERENCE_JSON_NAME]
return heroRefDict
| null | reference_util.py | reference_util.py | py | 1,169 | python | en | code | null | code-starcoder2 | 51 |
64330095 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('claims', '0004_invoice_taxes'),
]
operations = [
migrations.AlterField(
model_name='invoice',
name='client_type',
field=models.CharField(default=b'EC', max_length=2, verbose_name=b'Bill To', choices=[(b'EC', b'Customer'), (b'CD', b'Custom')]),
),
migrations.AlterField(
model_name='invoice',
name='notes',
field=models.CharField(max_length=255, null=True, verbose_name=b'Note in Header', blank=True),
),
]
| null | claims/migrations/0005_auto_20151221_0904.py | 0005_auto_20151221_0904.py | py | 702 | python | en | code | null | code-starcoder2 | 51 |
84769913 | import random
answer = random.randint(1,30+1)
trial = 5
guess = 0
user_name = input("Hi, What's your name?: ")
def question():
while True:
try:
guess = int(input("Hi, " + str(user_name) + ". Guess the number 1 to 30.: "))
except ValueError:
print("Wrong type. Try again.")
else:
return guess
while trial:
guess = question()
if guess == answer:
print("Congratuation! The answer is " + str(answer) + ".")
break
elif guess > answer:
trial -= 1
print("Too high! Try again. (%d times left)" % (trial))
elif guess < answer:
trial -= 1
print("Too low! Try again. (%d times left)" % (trial))
if trial == 0:
print("You failed. The answer is " + str(answer) + ".")
| null | first/numguess.py | numguess.py | py | 793 | python | en | code | null | code-starcoder2 | 51 |
480770290 | # -*- coding: utf-8 -*-
import pandas as pd
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
data = sio.loadmat('CaseDataAll.mat')
market_data = pd.read_csv('market.csv')
eom = data["a_EOM"]
mon = data["Mon_Yield"]
size = data["Mon_SizeAll"]
market_return = market_data['Mkt_Rf']
risk_free = market_data['rf']
for x in eom:
x[0] = int(x[0] / 100) # change the month in eom
size = np.array(size)
ave_size = []
skews = []
kurts = []
for i in range(60, np.array(size).shape[0]):
a = pd.Series(size[i])
ave_size.append(a.mean())
skews.append(a.skew())
kurts.append(a.kurt())
df2 = pd.DataFrame({'month': np.arange(0, np.array(size).shape[0] - 60), 'skew': skews})
# Draw Plot
plt.figure(figsize=(16, 10), dpi=80)
plt.plot('month', 'skew', data=df2, color='tab:red')
# Decoration
plt.ylim(0, 30)
xtick_location = df2.index.tolist()[::12]
xtick_labels = [(x // 12) + 2005 for x in df2.month.tolist()[::12]]
plt.xticks(ticks=xtick_location, labels=xtick_labels, rotation=0, fontsize=12, horizontalalignment='center',
alpha=.7)
plt.yticks(fontsize=12, alpha=.7)
plt.grid(axis='both', alpha=.3)
# Remove borders
plt.gca().spines["top"].set_alpha(0.0)
plt.gca().spines["bottom"].set_alpha(0.3)
plt.gca().spines["right"].set_alpha(0.0)
plt.gca().spines["left"].set_alpha(0.3)
plt.gca().set(xlabel='Year', ylabel='Monthly Skew of Size')
plt.savefig('figure2.png') | null | figure2.py | figure2.py | py | 1,415 | python | en | code | null | code-starcoder2 | 51 |
144398348 | height = float(input('身長をm単位で入力して下さい'))
weight = int(input('体重をkg単位で入力して下さい'))
bmi = weight / height **2
print(bmi)
if bmi < 18.5:
print('瘦せ型')
elif bmi >= 18.5 and bmi < 25:
print('普通')
else:
print('肥満体') | null | PycharmProjects/Tutorial_Excercise/bmi.py | bmi.py | py | 286 | python | en | code | null | code-starcoder2 | 51 |
531588895 | # Copyright (C) 2013 Lindley Graham
"""
This module contains methods used in calculating the volume of water present in
an ADCIRC simulation.
.. todo:: Some of these routines could be parallelized.
"""
import numpy as np
quad_faces = [[0, 1], [1, 2], [2, 0]]
def total_volume(domain, elevation):
"""
Calculates the total volume of water contained in an ADCIRC simulation with
sea surface height given by elevation.
:param domain: :class:`~polyadcirc.run_framework.domain`
:param elevation: eta, sea surface height (NOT WATER COLUMN HEIGHT)
:rtype: tuple
:returns: total volume, element-wise volume
"""
e_volume = np.empty((len(domain.element),))
for k, e in domain.element.iteritems():
e_volume[k-1] = element_volume(domain, e, elevation)
t_volume = e_volume.sum()
return t_volume, e_volume
def sub_volume(domain, elevation, elements):
"""
Calculates the total volume of water contained in an ADCIRC simulation for
a given set of elements with sea surface height given by elevation.
:param domain: :class:`~polyadcirc.run_framework.domain`
:param elevation: eta, sea surface height (NOT WATER COLUMN HEIGHT)
:param elements: list of element numbers to calcuate volumes for
:rtype: tuple
:returns: total volume, element-wise volume
"""
e_volume = np.empty((len(elements),))
for k in elements:
e_volume[k-1] = element_volume(domain, domain.element[k], elevation)
t_volume = e_volume.sum()
return t_volume, e_volume
def element_volume(domain, element, elevation):
"""
Calculates the volume of water contained an element with a given sea
surface elevation at each of the nodes.
:param domain: :class:`~polyadcirc.run_framework.domain`
:param element: list of nodes defining an element
:type element: array_like
:param elevation: eta, sea surface height (NOT WATER COLUMN HEIGHT)
:rtype: double
:returns: volume
"""
volume = 0
# Check if the element is dry
local_z = [elevation[i-1] for i in element]
if not np.array_equal(local_z, -99999.0*np.ones(3,)):
volume += triangle(domain, element, elevation)/3.0
volume += triangle(domain, element, -domain.bathymetry, -1.0)/3.0
for i in xrange(3):
volume += side(domain, element, i, elevation)/3.0
return volume
def triangle(domain, element, z, norm_dir=1.0):
"""
Calculates dot(x, n*A) where x is the barycenter, n is the normal vector to
the surface defined by z and the element verticies, and A is the area of
the surface defined by z and the element vertices.
:param domain: :class:`~polyadcirc.run_framework.domain`
:param element: list of nodes defining an element
:type element: array_like
:param z: z-coordinate relative to the geiod, z = eta OR z = -h
:param double norm_dir: 1.0 up, -1.0 down, direction of the normal vector
:type z: :class:`numpy.ndarray`
:rtype: double
:returns: dot(x, n*A)
"""
points = []
for i in element:
n = domain.node[i]
points.append(np.array([n.x, n.y, z[i-1]]))
barycenter = np.average(points, 0)
normal = norm_dir*np.cross(points[0]-points[1], points[0]-points[2])
#area = .5* np.linalg.norm(normal)
#normal = normal/(2*area)
#bna = np.dot(barycenter, normal*area)
bna = np.dot(barycenter, normal/2)
return bna
def side(domain, element, side_num, elevation):
"""
Calculates dot(x, n*A) where x is the barycenter, n is the normal vector to
the surface defined by z and the element verticies, and A is the area of
the surface defined by z and the element vertices.
:param domain: :class:`~polyadcirc.run_framework.domain`
:param element: list of nodes defining an element
:type element: array_like
:param elevation: eta, sea surface height (NOT WATER COLUMN HEIGHT)
:type z: :class:`numpy.ndarray`
:rtype: double
:returns: dot(x, n*A)
"""
surface_points = []
bottom_points = []
points = []
for i in element[quad_faces[side_num]]:
n = domain.node[i]
if elevation[i-1] != -99999.000: # if node is wet
surface_points.append(np.array([n.x, n.y, elevation[i-1]]))
bottom_points.append(np.array([n.x, n.y, -domain.bathymetry[i-1]]))
else: # node is dry
points.append(np.array([n.x, n.y, -domain.bathymetry[i-1]]))
# check to see if dry, partially dry, or wet
if len(points) == 2: # Dry
bna = 0
elif len(points) == 1: # Partially dry
points.append(surface_points[0])
points.append(bottom_points[0])
barycenter = np.average(points, 0)
normal = np.cross(points[0]-points[1], points[0]-points[2])
bna = np.dot(barycenter, normal/2)
else: # Wet
barycenter = np.average(np.vstack((surface_points, bottom_points)), 0)
normal = np.cross(bottom_points[0]-surface_points[1],
bottom_points[1]-surface_points[0])
bna = np.dot(barycenter, normal/2)
return bna
| null | polyadcirc/pyADCIRC/volume.py | volume.py | py | 5,111 | python | en | code | null | code-starcoder2 | 50 |
377646224 | from hashlib import md5
from sslyze.server_connectivity_tester import ServerConnectivityTester, \
ServerConnectivityError, ConnectionToServerTimedOut
from sslyze.ssl_settings import TlsWrappedProtocolEnum
from sslyze.plugins.openssl_cipher_suites_plugin import Sslv20ScanCommand, \
Sslv30ScanCommand, Tlsv10ScanCommand, Tlsv11ScanCommand, \
Tlsv12ScanCommand, Tlsv13ScanCommand
from sslyze.synchronous_scanner import SynchronousScanner
from . import results
from .errors import ConnectionError
# Policy prohibits the use of SSL 2.0/3.0 and TLS 1.0
ciphersuites = {
"policy": [Sslv20ScanCommand(), Sslv30ScanCommand(),
Tlsv10ScanCommand(), Tlsv11ScanCommand()],
"full": [Sslv20ScanCommand(), Sslv30ScanCommand(),
Tlsv10ScanCommand(), Tlsv11ScanCommand(),
Tlsv12ScanCommand(), Tlsv13ScanCommand()]
}
# sslyze config
SynchronousScanner.DEFAULT_NETWORK_RETRIES = 1
SynchronousScanner.DEFAULT_NETWORK_TIMEOUT = 3
ERROR_MSG_CONNECTION_TIMEOUT = 'TCP connection to {}:{} timed-out'.format
ERROR_MSG_UNKNOWN_CONNECTION = \
'TCP connection to {}:{} encountered unknown error'.format
def scan(name, ip, port, view, suite):
""" Five inputs: web site name, ip, port
split-dns view, and cipher suite """
try:
server_tester = ServerConnectivityTester(
hostname=name,
ip_address=ip,
port=port,
tls_wrapped_protocol=TlsWrappedProtocolEnum.HTTPS
)
# This line checks to see if the host is online
server_info = server_tester.perform()
ip = server_info.ip_address
# Could not establish an SSL connection to the server
except ConnectionToServerTimedOut:
raise ConnectionError('Connection Timeout',
ERROR_MSG_CONNECTION_TIMEOUT(name, port))
except ServerConnectivityError:
raise ConnectionError('Unknow Connection Error',
ERROR_MSG_UNKNOWN_CONNECTION(name, port))
# Create a new results dictionary
scan_output = results.new()
# I hash the combination of hostname and ip for tracking
key = md5((f'{name}' + ip).encode("utf-8")).hexdigest()
results.set_result(scan_output, "MD5", key)
results.set_result(scan_output, "Hostname", f'{name}')
results.set_result(scan_output, "IP", ip)
results.set_result(scan_output, "View", view)
for suite in ciphersuites.get(suite):
synchronous_scanner = SynchronousScanner()
scan_result = synchronous_scanner.run_scan_command(server_info, suite)
for cipher in scan_result.accepted_cipher_list:
results.set_ciphers(scan_output,
{
"Version": cipher.ssl_version.name,
"Cipher": cipher.name
}
)
if len(scan_output["Results"]) == 0:
results.set_result(scan_output, "Results", "No Policy Violations")
return scan_output
| null | SSLChecker/sharedcode/scanner.py | scanner.py | py | 3,122 | python | en | code | null | code-starcoder2 | 50 |
361581330 | import argparse
import json
import sys
import redis
from pystdlib.uishim import get_selection
from pystdlib.shell import term_create_window, tmux_create_window
from pystdlib import shell_cmd
parser = argparse.ArgumentParser(description="Execute command over SSH.")
parser.add_argument("--choices", dest="show_choices", action="store_true",
default=False, help="show predefined command choices")
parser.add_argument("--ignore-tmux", dest="ignore_tmux", action="store_true",
default=False, help="open connection in new terminal window rather than tmux pane")
args = parser.parse_args()
r = redis.Redis(host='localhost', port=6379, db=0)
extra_hosts_data = json.loads(r.get("net/extra_hosts"))
host = get_selection(extra_hosts_data.keys(), "ssh to", case_insensitive=True, lines=10, font="@wmFontDmenu@")
if host:
host_meta = extra_hosts_data[host]
host_vpn = host_meta.get("vpn", None)
if host_vpn:
shell_cmd(f"vpnctl --start {host_vpn}")
ssh_user = host_meta.get("user", None)
ssh_port = host_meta.get("port", None)
cmd = f"ssh{' -l ' + ssh_user if ssh_user else ''}{' -p ' + str(ssh_port) if ssh_port else ''} {host_meta['ips'][0]}"
if args.show_choices:
command_choices = json.loads(r.get("net/command_choices"))
choice = get_selection(command_choices, "execute", case_insensitive=True, lines=5, font="@wmFontDmenu@")
if choice:
cmd += f" -t '{choice}'"
else:
sys.exit(1)
if args.ignore_tmux:
term_create_window(cmd, term_cmd=["@defaultTerminal@", "-e"])
else:
result = tmux_create_window(cmd, session_name=host_meta.get("tmux", "@tmuxDefaultSession@"),
window_title="ssh :: {host}")
if not result:
term_create_window(cmd, term_cmd=["@defaultTerminal@", "-e"])
| null | modules/localnfra/networking/scripts/sshmenu.py | sshmenu.py | py | 1,880 | python | en | code | null | code-starcoder2 | 50 |
409449202 | #!/usr/bin python3
from collections import OrderedDict
from teacher import PiggyParent
import sys
import time
class Piggy(PiggyParent):
'''
*************
SYSTEM SETUP
*************
'''
def __init__(self, addr=8, detect=True):
PiggyParent.__init__(self) # run the parent constructor
'''
MAGIC NUMBERS <-- where we hard-code our settings
'''
self.LEFT_DEFAULT = 87
self.RIGHT_DEFAULT = 84
self.MIDPOINT = 1500
self.SAFE_DISTANCE = 250
self.CLOSE_DISTANCE = 40
self.set_motor_power(self.MOTOR_LEFT + self.MOTOR_RIGHT, 0)
self.load_defaults()
def load_defaults(self):
"""Implements the magic numbers defined in constructor"""
self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)
self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)
self.set_servo(self.SERVO_1, self.MIDPOINT)
def menu(self):
"""Displays menu dictionary, takes key-input and calls method"""
## This is a DICTIONARY, it's a list with custom index values. Python is cool.
# Please feel free to change the menu and add options.
print("\n *** MENU ***")
menu = {"n": ("Navigate", self.nav),
"d": ("Dance", self.dance),
"o": ("Obstacle count", self.obstacle_count),
"s": ("Shy", self.shy),
"f": ("Follow", self.follow),
"c": ("Calibrate", self.calibrate),
"q": ("Quit", self.quit)
}
# loop and print the menu...
for key in sorted(menu.keys()):
print(key + ":" + menu[key][0])
# store the user's answer
ans = str.lower(input("Your selection: "))
# activate the item selected
menu.get(ans, [None, self.quit])[1]()
'''
****************
STUDENT PROJECTS
****************
'''
def dance(self):
"""A higher-ordered algorithm to make your robot dance"""
if not self.safe_to_dance():
return false # SHUT THE DANCE DOWN
for x in range(3):
self.strut()
self.right_twist()
self.strut()
self.left_twist()
self.backward_shimmey()
self.spinarama()
self.foward_shimmey()
def right_twist(self):
"""The robot turns in a right circle once"""
self.turn_by_deg(180)
#time.sleep(.1)
self.stop()
self.turn_by_deg(180)
#time.sleep(.1)
self.stop()
def left_twist(self):
"""Robot turns in a circle once to the left"""
self.turn_by_deg(-179)
#time.sleep(.1)
self.stop()
self.turn_by_deg(-179)
#time.sleep(.1)
self.stop()
def strut(self):
"""Robot is moving foward while looking right to left """
self.fwd(left=50, right=50)
for x in range(2):
self.servo(1000)
time.sleep(.1)
self.servo(1500) # Look Straight
time.sleep(1)
self.servo(2000)
time.sleep(.1)
self.servo(1500)
def backward_shimmey(self):
"""Robot is moving backwards while moving his body left and right"""
for x in range(6):
self.right(primary=-70, counter=-30)
time.sleep(.5)
self.left(primary=-70, counter=-30)
time.sleep(.5)
self.stop()
def spinarama(self):
"""Robot moves in a circle to turn around and move forward"""
for x in range(6):
self.right(primary=-100, counter=-500)
time.sleep(3.5)
self.fwd()
time.sleep(1)
self.stop()
def foward_shimmey(self):
"""Robot moves forward while moving his body left and right"""
for x in range(6):
self.right(primary=60, counter=30)
time.sleep(.5)
self.left(primary=70, counter=30)
time.sleep(.5)
self.back()
time.sleep(2)
self.stop()
def safe_to_dance(self):
""" Does a 360 distance check and returns true if safe """
# check for all fail/early-termination conditions
for _ in range(4):
if self.read_distance() < 300:
print("NOT SAFE TO DANCE!")
return False
else:
self.turn_by_deg(90)
#after all checks have been done. We deduce it's safe
print("SAFE TO DANCE!")
return True
for x in range(3):
self.shake()
def shake(self):
self. deg_fwd(720)
slef.stop()
def example_move(self):
"""this is an example dance move that should be replaced by student-created content"""
self.right() # start rotating right
time.sleep(1) # turn for a second
self.stop() # stop
self.servo(1000) # look right
time.sleep(.25) # give your head time to move
self.servo(2000) # look left
def scan(self):
"""Sweep the servo and populate the scan_data dictionary"""
for angle in range(self.MIDPOINT-400, self.MIDPOINT+401, 100):
self.servo(angle)
self.scan_data[angle] = self.read_distance()
#sort the scan data for easier analysis
self.scan_data = OrderedDict(sorted(self.scan_data.items()))
# Robot will turn right or left based on data taken
def right_or_left(self):
""" Should I turn left or right?
Returns a 'r' or 'l' baseed on scan data"""
right_sum = 0
right_avg = 0
left_sum = 0
left_avg = 0
self.scan()
for angle in self.scan_data:
if angle < self.MIDPOINT:
right_sum += self.scan_data[angle]
right_avg += 1
else:
left_avg += self.scan_data[angle]
left_avg += 1
left_avg = left_sum / left_avg
right_avg = right_sum / right_avg
if left_avg > right_avg:
return 'l'
else:
return 'r'
def obstacle_count(self):
"""Does a 360 scan and returns the number of obstacles it sees"""
for x in range(6):
# do a scan of the area in front of the robot
self.scan()
see_an_object = False
count = 0
# Do a scan and count the amount of objects in the way
for angle in self.scan_data:
dist = self.scan_data[angle]
if dist < self.SAFE_DISTANCE and not see_an_object:
see_an_object = True
count += 1
print("~~~ I SEE SOMETHING!! ~~~")
elif dist > self.SAFE_DISTANCE and see_an_object:
see_an_object = False
print("I guess the object ended")
print("ANGLE: %d | DIST: %d" % (angle, dist))
self.turn_by_deg(90)
print("\nI saw %d objects" % count)
def quick_check(self):
""" Moves the servo to three angles and performs a distance check """
# loop three times and move the servo
for ang in range(self.MIDPOINT - 100, self.MIDPOINT + 101, 100):
self.servo(ang)
time.sleep(.01)
if self.read_distance() < self.SAFE_DISTANCE:
return False
# if the three-part check didn't freak out
return True
def turn_until_clear(self):
""" Rotateb right until no obstacle is seen """
print("Turning until clear")
# make sure we're looking straight
self.servo(self.MIDPOINT)
while self.read_distance() < self.SAFE_DISTANCE + 200:
self.left(primary=40, counter=-40)
time.sleep(.05)
# stop motion before we end the method
self.turn_by_deg(25)
self.stop()
def nav(self):
""" Auto-pilot Porgram """
print("-----------! NAVIGATION ACTIVATED !------------\n")
print("-------- [ Press CTRL + C to stop me ] --------\n")
print("-----------! NAVIGATION ACTIVATED !------------\n")
exit_ang = self.get_heading()
while True:
if not self.quick_check():
self.stop()
self.back()
time.sleep(.5)
self.stop()
self.turn_to_deg(exit_ang)
self.turn_until_clear() # biased toward one side
else:
self.fwd(right = 100, left = 100)
# TODO: scan so we can decide left or right
# TODO: average the right side of the scan dict
# TODO: average the left side of the scan dict
###########
## MAIN APP
if __name__ == "__main__": # only run this loop if this is the main file
p = Piggy()
if sys.version_info < (3, 0):
sys.stdout.write("Sorry, requires Python 3.x\n")
p.quit()
try:
while True: # app loop
p.menu()
except KeyboardInterrupt: # except the program gets interrupted by Ctrl+C on the keyboard.
p.quit()
| null | student.py | student.py | py | 9,303 | python | en | code | null | code-starcoder2 | 50 |
329883173 | import os
BASE_DIR = os.path.dirname(__file__)
PROXY_PATH = None
REFRESH_PROXY_EACH = 30 # seconds
USER_AGENTS_PATH = os.path.join(BASE_DIR, 'support', 'ualist')
PROXY_TYPE = os.getenv('PROXY_TYPE', 'SOCKS5')
LOG_LEVEL = 'INFO'
LOG_MAX_FILE_BYTES = 1 * 1024 * 1024
LOG_BACKUP_COUNT = 50
LOG_FILE_PATH = 'logs'
| null | scrapyard/settings.py | settings.py | py | 315 | python | en | code | null | code-starcoder2 | 51 |
130394863 | #!/usr/bin/env python
from geometry_msgs.msg import Twist, Vector3
from sensor_msgs.msg import LaserScan
from neato_node.msg import Bump
import rospy
import tty
import select
import sys
import termios
class Control_Robot():
def __init__(self):
""" Initialize the robot control, """
rospy.init_node('my_teleop')
self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
self.sleepy = rospy.Rate(2)
# make dictionary that calls functions
self.state = {'i':self.forward, ',':self.backward,
'l':self.rightTurn, 'j':self.leftTurn,
'k':self.stop}
self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)
self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)
self.sendMessage()
# get key interupt things
self.settings = termios.tcgetattr(sys.stdin)
self.key = None
def getKey(self):
""" Interupt (I think) that get a non interrupting keypress """
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
self.key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
def forward(self):
"""
Sets the velocity to forward
Only checking for bump sensors in forward
"""
print('forward')
self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)
self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)
def backward(self):
print('backward')
""" Sets the velocity to backward """
self.linearVector = Vector3(x=-1.0, y=0.0, z=0.0)
self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)
def leftTurn(self):
print('leftTurn')
""" Sets the velocity to turn left """
self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)
self.angularVector = Vector3(x=0.0, y=0.0, z=1.0)
def rightTurn(self):
print('rightTurn')
""" Sets the velocity to turn right """
self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)
self.angularVector = Vector3(x=0.0, y=0.0, z=-1.0)
def stop(self):
""" Sets the velocity to stop """
print('stop')
self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)
self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)
def sendMessage(self):
""" Publishes the Twist containing the linear and angular vector """
print('sendMessage')
self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))
def run(self):
while self.key != '\x03':
self.getKey()
try:
self.state[self.key].__call__()
except:
# on any other keypress, stop the robot
self.state['k'].__call__()
self.sendMessage()
control = Control_Robot()
control.run()
| null | warmup_project/scripts/teleop.py | teleop.py | py | 2,883 | python | en | code | null | code-starcoder2 | 51 |
48298201 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 16:14:26 2020
@author: hp
"""
n=input('Enter a positive integer number:')
def collatz(number):
r = int(number) % 2
if r == 0:
return int(number) // 2
else:
return 3 * int(number) + 1
while n !=1:
print(collatz(n))
n = collatz(n) | null | Practical5/collatz.py | collatz.py | py | 316 | python | en | code | null | code-starcoder2 | 51 |
542718591 | #!/usr/bin/python
import os
# envsensor_observer configuration ############################################
# Bluetooth adaptor
BT_DEV_ID = 0
# time interval for sensor status evaluation (sec.)
CHECK_SENSOR_STATE_INTERVAL_SECONDS = 5
INACTIVE_TIMEOUT_SECONDS = 60
# Sensor will be inactive state if there is no advertising data received in
# this timeout period.
| null | envsensor/conf.py | conf.py | py | 367 | python | en | code | null | code-starcoder2 | 51 |
385539697 | # 10-8. Cats and Dogs: Make two files, cats.txt and dogs.txt. Store at least three
# names of cats in the first file and three names of dogs in the second file. Write
# a program that tries to read these files and print the contents of the file to the
# screen. Wrap your code in a try-except block to catch the FileNotFound error,
# and print a friendly message if a file is missing. Move one of the files to a different
# location on your system, and make sure the code in the except block
# executes properly.
def file_reader(filename):
"""Takes in a text file and prints contents to the screen."""
try:
with open(filename) as file_object:
content = file_object.read()
print(content)
except FileNotFoundError:
print("Sorry, I cannot find " + filename + " :(")
file_reader('cats.txt')
file_reader('dogs.txt') | null | exercises/chapter-10/cats_and_dogs.py | cats_and_dogs.py | py | 873 | python | en | code | null | code-starcoder2 | 51 |
445159429 | class Solution(object):
def numDistinct(self, s, t):
"""
:type s: str
:type t: str
:rtype: int
"""
m, n=len(s), len(t)
dp=[0]*(n+1)
dp[0]=1
for i in xrange(1, m+1):
for j in xrange(n, 0, -1):
dp[j]=dp[j]+(dp[j-1] if s[i-1]==t[j-1] else 0)
return dp[-1]
| null | 115-Distinct-Subsequences/solution.py | solution.py | py | 411 | python | en | code | null | code-starcoder2 | 51 |
411601710 | #%% INFO
# Simple script to fetch block info from a Substrate node using:
# https://github.com/paritytech/substrate-api-sidecar
#
import requests
import json
import time
import pickle
import argparse
class Sync:
def __init__(self, endpoint, write, use_json, start_block, end_block, continue_sync, fprefix):
# User inputs
self.endpoint = endpoint
self.write = write
self.use_json = use_json
self.start_block = start_block
self.end_block = end_block
self.continue_sync = continue_sync
self.file_prefix = fprefix
# Constructors
self.blocks = []
self.process_inputs()
def get_block(self, index: int):
return self.blocks[index]
def process_inputs(self):
if self.end_block > 0:
assert(self.end_block > self.start_block)
if self.end_block == 0:
self.end_block = self.get_chain_height()
# Construct a path to some sidecar info.
def construct_url(self, path=None, param1=None, param2=None):
base_url = self.endpoint
if path:
url = base_url + '/' + str(path)
if param1 or param1 == 0:
url = url + '/' + str(param1)
if param2 or param2 == 0:
url = url + '/' + str(param2)
return url
# Request some data from sidecar.
def sidecar_request(self, endpoint):
try:
response = requests.get(endpoint)
except:
print('Unable to connect to sidecar.')
data = {}
if response.ok:
data = json.loads(response.text)
else:
error_message = 'Response Error: {}'.format(response.status_code)
print(error_message)
data = { 'error' : error_message }
return data
# Get the block number of the current finalized head.
def get_chain_height(self):
url = self.construct_url('block')
latest_block = self.sidecar_request(url)
if 'error' in latest_block.keys():
print('Warn! Bad response from client. Returning genesis block.')
return 0
self.process_block(latest_block)
chain_height = latest_block['number']
return chain_height
def fetch_block(self, number: int):
url = self.construct_url('block', number)
block = self.sidecar_request(url)
if 'error' in block.keys():
print('Warn! Bad response from client on block {}.'.format(number))
self.process_block(block)
return block
# A bunch of asserts to make sure we have a valid block. Make block number an int.
def process_block(self, block: dict, block_number=None):
assert('number' in block.keys())
block['number'] = int(block['number'])
assert('stateRoot' in block.keys())
assert('onInitialize' in block.keys())
assert('extrinsics' in block.keys())
assert('onFinalize' in block.keys())
if block_number:
assert(int(block['number']) == block_number)
if int(block['number']) % 2_000 == 0:
self.print_block_info(block)
# Print some info about a block. Mostly used to show that sync is progressing.
def print_block_info(self, block: dict):
print(
'Block {:>9,} has state root {}'.format(
int(block['number']), block['stateRoot']
)
)
# Actually get blocks.
def sync(self, from_block=0, to_block=None):
if not to_block:
to_block = self.get_chain_height()
for block_number in range(from_block, to_block):
block = self.fetch_block(block_number)
self.blocks.append(block)
# Get the block number of the highest synced block.
def get_highest_synced(self):
highest_synced = 0
if len(self.blocks) > 0:
highest_synced = self.blocks[-1]['number']
return highest_synced
# The main logic about adding new blocks to the chain.
def add_new_blocks(self, highest_synced: int, chain_tip: int):
# `highest_synced + 1` here because we only really want blocks with a child.
if chain_tip == highest_synced + 1:
print('Chain synced at height {:,}'.format(chain_tip))
self.sleep(10)
elif chain_tip > highest_synced + 1:
self.sync(highest_synced + 1, chain_tip)
self.sleep(1)
elif chain_tip < highest_synced + 1:
print('This is impossible, therefore somebody messed up.')
self.sleep(10)
# Wait, but if interrupted, exit.
def sleep(self, sec: int):
try:
time.sleep(sec)
except KeyboardInterrupt:
self.write_and_exit()
# Ask user if they want to save the block data and then exit.
def write_and_exit(self):
savedata = input('Do you want to save the block data? (y/N): ')
if savedata.lower() == 'y':
self.write_to_file()
exit()
# Write all blocks to a single file.
def write_to_file(self):
fname = input('Input a filename: ')
if self.use_json:
fn = fname + '.data'
with open(fn, 'w') as f:
json.dump(self.blocks, f)
else:
fn = fname + '.pickle'
with open(fn, 'wb') as f:
pickle.dump(self.blocks, f)
# Write a single block to a JSON file.
def write_block_to_file(self, reason='info'):
fname = 'blocks/{}-{}-{}.json'.format(
self.file_prefix,
block['number'],
reason
)
with open(fname, 'w') as f:
json.dump(block, f, indent=2)
# Read blocks from a file.
def read_from_file(self, start_desired: int, end_desired: int):
print('Importing blocks...')
try:
if use_json:
fname = self.file_prefix + '.data'
with open(fname, 'r') as f:
self.blocks = json.load(f)
else:
fname = self.file_prefix + '.pickle'
with open(fname, 'rb') as f:
self.blocks = pickle.load(f)
except:
print('No data file.')
self.blocks = []
if blockdata:
print('Imported {:,} blocks.'.format(len(self.blocks)))
start_block = self.blocks[0]['number']
end_block = self.block[-1]['number']
if start_block <= start_desired and end_block >= end_desired:
# TODO: Prune to desired set.
print('Imported blocks {} to {}.'.format(start_block, end_block))
else:
# TODO: Return the partial set and sync around it.
self.blocks = []
print('Block data exists but does not cover desired blocks.')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-w', '--write-files',
help='Write blocks that have duplicate transactions to files.',
action='store_true'
)
parser.add_argument(
'-j', '--json',
help='Import blocks from JSON (plaintext) file. Slower than the default, pickle.',
action='store_true'
)
args = parser.parse_args()
write = args.write_files
use_json = args.json
return (write, use_json)
if __name__ == "__main__":
(write, use_json) = parse_args()
start_block = 2349900
max_block = 0
endpoint = 'http://127.0.0.1:8080'
syncer = Sync(endpoint, write, use_json, start_block, max_block, True, 'blockdata')
if max_block == 0:
max_block = syncer.get_chain_height()
print('Starting sync from block {} to block {}'.format(start_block, max_block))
blocks = syncer.sync(start_block, max_block)
# blocks = read_from_file(0, 10)
if syncer.continue_sync:
while True:
highest_synced = syncer.get_highest_synced()
chain_tip = syncer.get_chain_height()
blocks = syncer.add_new_blocks(highest_synced, chain_tip)
else:
syncer.write_and_exit()
| null | sync.py | sync.py | py | 6,835 | python | en | code | null | code-starcoder2 | 51 |
468736630 | #
# Used for detecting stage two, namely to see if yellow plug has been plugged
#
import feature_detetor
org_size = 0
detected_x = 0
detected_y = 0
def set_params(x,y,size_org):
global detected_x
global detected_y
global org_size
detected_x = x
detected_y = y
org_size = size_org
def retrieve_params():
return detected_x,detected_y,org_size
def run(last_valid_frame,frame):
feature_detetor.pre_size_estimate(1200, 3600)
global detected_x
global detected_y
is_detected,x,y = feature_detetor.detect_orange_btn(last_valid_frame, frame, detected_x, detected_y)
if is_detected == 1:
detected_x = x
detected_y = y
return True
return False
| null | stage_2_main.py | stage_2_main.py | py | 718 | python | en | code | null | code-starcoder2 | 51 |
73269817 | from os import listdir,path
from pickle import load
from face_recognition import load_image_file,face_locations,face_encodings
import face_training
from collections import namedtuple
import time
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
face = namedtuple('face', 'picname predictions distance neighbors')
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6,neighors_distance_threshold=0.7):
faces_list = []
with open(model_path, 'rb') as f: knn_clf = load(f)
with open('Y_list.dict', 'rb') as F: Y_dict_mapping = load(F)
X_img = load_image_file(X_img_path)
X_face_locations = face_locations(X_img)
if len(X_face_locations) == 0:
faces_list.append(face(X_img_path,' ',' ',' '))
return faces_list
faces_encodings = face_encodings(X_img, known_face_locations=X_face_locations)
closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors= len(Y_dict_mapping))
for n in range(len(closest_distances[0])):
ni = []
if closest_distances[0][0][n] <= distance_threshold:
pri,dis = knn_clf.predict(faces_encodings)[n],1 - closest_distances[0][0][n]
neighors_distance_threshold=0.7
else :
pri,dis = 'Unknown',0
neighors_distance_threshold=0.8
for j in range(len(closest_distances[0][n])):
if closest_distances[0][n][j] <= neighors_distance_threshold:
if not Y_dict_mapping[closest_distances[1][n][j]] == knn_clf.predict(faces_encodings)[n]:
ni.append(Y_dict_mapping[closest_distances[1][n][j]])
faces_list.append(face(X_img_path,pri,dis,ni))
return tuple(faces_list)
#if __name__ == "__main__":
#print("Training KNN classifier...")
#face_training.train("knn_examples/train", model_save_path="trained_knn_model.clf")
#print("Training complete!")
#t0 = time.time()
def main(impath="knn_examples/test"):
for image_file in listdir(impath):
prediction = predict(path.join(impath, image_file), model_path="trained_knn_model.clf")
for i in prediction:
if i.predictions == ' ': print ('There is no face in the Picture')
elif len(i.neighbors) > 0 : print (i.picname,i.predictions,'---->',i.neighbors)
elif len(i.neighbors) == 0: print (i.picname,i.predictions)
| null | servingstatic/face_detection_and_matching.py | face_detection_and_matching.py | py | 2,357 | python | en | code | null | code-starcoder2 | 51 |
233393608 | ###################################
########batch_generator############
###################################
import numpy as np
def chunker(seq, size=32):
# It will cut seq(a list) into lots of pieces, and len(every piece) = size
# e.g. chunker([1,2,3,4,5],2) = [1,2]->[3,4]->[5]
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def batch_generator(list, batch_size=16):
# Arguments:
# list:
# Here should see this "list" as an abstract and basic parameter.
# Returns:
# batch:
# A subset of shuffled list, when this generator is called after a epoch's times, it will automatically reshuffle.
while True:
np.random.shuffle(list)
batches = chunker(list, batch_size)
for bat in batches:
yield batch
###################################
##################kfold############
###################################
from sklearn.model_selection import StratifiedKFold
def stratified_k_fold(x, y, k):
# Arguments:
# x: data
# y: annotation
# k: splitting number
# Return:
# split_result: splitting indices which is a list and every element is a tuple with 2 length
# tuple[0] means training set's index
# tuple[1] means testing set's index
skf = StratifiedKFold(n_splits=k)
split_result = list(skf.split(x, y))
return split_result
def get_a_fold(x, y, split_result, selected_fold_num):
# Arguments:
# x: data
# y: annotation
# split_result: sklearn's results, splitting indices
# selected_fold_nun: selected part(1 ~ k)
# Return:
# x_fold: real data, x_fold[0] training data, x_fold[1] means testing data
# annotation_fold: real annotations data, y_fold[0] means training annotations,
# y_fold[1] means testing annotations
x_fold = []
x_fold.append([x[i] for i in split_result[selected_fold_num-1][0]])
x_fold.append([x[i] for i in split_result[selected_fold_num - 1][1]])
y_fold = []
y_fold.append([y[i] for i in split_result[selected_fold_num - 1][0]])
y_fold.append([y[i] for i in split_result[selected_fold_num - 1][1]])
return x_fold, y_fold
| null | StatisticalLearning/DataGenarator/utils.py | utils.py | py | 2,207 | python | en | code | null | code-starcoder2 | 51 |
506097719 | # Find the first occurence of elememts in array using binray search
def BS_first_occur(arr,n,x):
low =0
high= n-1
result = -1
while(low<=high):
mid = low+(high-low)//2
if x ==arr[mid]:
result=mid
high= mid-1
elif x<arr[mid]:
high= mid-1
else:
low= mid+1
return result
arr = [2,4,10,10,10,18,20]
n = len(arr)
x=10
print(BS_first_occur(arr,n,x))
| null | Binary Search/bs_first_occur.py | bs_first_occur.py | py | 396 | python | en | code | null | code-starcoder2 | 51 |
506119430 | from bs4 import BeautifulSoup
import pandas as pd
import os
import lxml
import settings
def grade(name, points_per_test, comments, ok):
#Grade Results
results= {q[:-3]:ok.grade(q[:-3]) for q in os.listdir("tests") if q.startswith('q')}
#If running locally with lots of notebooks load the grades.
df = pd.DataFrame()
row=df.shape[0]
df.loc[row,'student']=name #This is set in the last.
#df.loc[row,'rcsid']=rcsid #This is set in the last.
total_grade=0
#This loops through the results
for key, val in results.items():
df.loc[row,key]=val.grade
results_key=str(key)+"-failed"
df.loc[row,key]=val.grade*points_per_test
#We use beautiful soup to parse the tests.
soup = BeautifulSoup(str(val.failed_tests), "html.parser")
#There are multiple components, but the expected data seems most valuable.
got = soup.get_text().split('\\n')[16:20]
df.loc[row,results_key]=str(got)
total_grade+=df.loc[row,key] #total grade
df.loc[row,'total_grade']=total_grade
df.loc[row,'comments']=comments
if not os.path.isfile('grades.csv'):
df.to_csv('grades.csv', index=False)
else: # else it exists so append without writing the header
df.to_csv('grades.csv', mode='a', header=False,index=False)
return df
| null | notebooks/grade.py | grade.py | py | 1,334 | python | en | code | null | code-starcoder2 | 51 |
536054091 | from typing import Tuple, List, Dict
from environments.environment_abstract import Environment, State
import random; random.seed(0)
def policy_evaluation_step(env: Environment, states: List[State], state_vals: Dict[State, float],
policy: Dict[State, List[float]], discount: float) -> Tuple[float, Dict[State, float]]:
change: float = 0.0
for s in states:
v = state_vals[s]
new_v = 0
for a in env.get_actions():
r, next_states, t_probs = env.state_action_dynamics(s, a)
new_v += policy[s][a] * (r + discount * sum(p * state_vals[s_pr] for s_pr, p in zip(next_states, t_probs)))
state_vals[s] = new_v
change = max(change, abs(v - state_vals[s]))
return change, state_vals
def q_learning_step(env: Environment, state: State, action_vals: Dict[State, List[float]], epsilon: float,
learning_rate: float, discount: float):
if random.random() < epsilon:
action = random.choice(env.get_actions())
else:
action = max(zip(env.get_actions(), action_vals[state]), key=lambda x: x[1])[0]
state_next, r = env.sample_transition(state, action)
action_vals[state][action] += learning_rate * (r + discount * max(
action_vals[state_next][a] - action_vals[state][action] for a in env.get_actions()))
return state_next, action_vals
| null | assignments_code/assignment2.py | assignment2.py | py | 1,389 | python | en | code | null | code-starcoder2 | 51 |
225519986 | import gym
from gym.wrappers import Monitor
import itertools
import numpy as np
import os
import random
import sys
import tensorflow as tf
import time
from lib import plotting
from lib.dqn_utils import *
from collections import deque, namedtuple
# make enviroment
env = gym.envs.make("Breakout-v0")
# Atari Actions: 0 (noop), 1 (fire), 2 (left) and 3 (right) are valid actions
VALID_ACTIONS = [0, 1, 2, 3]
class Estimator():
"""
Q-Value Estimator neural network.
This network is used for both the Q-Network and the Target Network.
"""
def __init__(self, scope="estimator", summaries_dir=None):
self.scope = scope
# Writes Tensorboard summaries to disk
self.summary_writer = None
with tf.variable_scope(scope):
# Build the graph
self._build_model()
if summaries_dir:
summary_dir = os.path.join(summaries_dir, "summaries_{}".format(scope))
if not os.path.exists(summary_dir):
os.makedirs(summary_dir)
self.summary_writer = tf.summary.FileWriter(summary_dir)
def _build_model( self ):
"""
build computation graph
"""
# Placeholders for our input
# Our input are 4 RGB frames of shape 84, 84 each
self.X_pl = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X")
# The TD target value
self.y_pl = tf.placeholder(shape=[None], dtype=tf.float32, name="y")
# Integer id of which action was selected
self.actions_pl = tf.placeholder(shape=[None], dtype=tf.int32, name="actions")
X = tf.to_float(self.X_pl) / 255.0
batch_size = tf.shape(self.X_pl)[0]
# Three convolutional layers
conv1 = tf.contrib.layers.conv2d(X, 32, 8, 4, activation_fn=tf.nn.relu)
conv2 = tf.contrib.layers.conv2d(conv1, 64, 4, 2, activation_fn=tf.nn.relu)
conv3 = tf.contrib.layers.conv2d(conv2, 64, 3, 1, activation_fn=tf.nn.relu)
# Fully connected layers
flattened = tf.contrib.layers.flatten(conv3)
fc1 = tf.contrib.layers.fully_connected(flattened, 512)
self.predictions = tf.contrib.layers.fully_connected(fc1, len(VALID_ACTIONS))
# Get the predictions for the chosen actions only
gather_indices = tf.range(batch_size) * tf.shape(self.predictions)[1] + self.actions_pl
self.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]), gather_indices)
# Calcualte the loss
self.losses = tf.squared_difference( self.y_pl, self.action_predictions )
self.loss = tf.reduce_mean( self.losses )
# Optimizer Parameters from original paper
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
self.train_op = self.optimizer.minimize(self.loss, global_step=tf.contrib.framework.get_global_step())
# Summaries for Tensorboard
self.summaries = tf.summary.merge([
tf.summary.scalar("loss", self.loss),
tf.summary.histogram("loss_hist", self.losses),
tf.summary.histogram("q_values_hist", self.predictions),
tf.summary.scalar("max_q_value", tf.reduce_max(self.predictions))
])
def predict(self, sess, s):
"""
Predicts action values.
Args:
sess: Tensorflow session
s: State input of shape [batch_size, 4, 160, 160, 3]
Returns:
Tensor of shape [batch_size, NUM_VALID_ACTIONS] containing the estimated
action values.
"""
return sess.run(self.predictions, { self.X_pl: s })
def update(self, sess, s, a, y):
"""
Updates the estimator towards the given targets.
Args:
sess: Tensorflow session object
s: State input of shape [batch_size, 4, 160, 160, 3]
a: Chosen actions of shape [batch_size]
y: Targets of shape [batch_size]
Returns:
The calculated loss on the batch.
"""
feed_dict = { self.X_pl: s, self.y_pl: y, self.actions_pl: a }
summaries, global_step, _, loss = sess.run(
[self.summaries, tf.contrib.framework.get_global_step(), self.train_op, self.loss], feed_dict)
if self.summary_writer:
self.summary_writer.add_summary(summaries, global_step)
return loss
def deep_q_learning(sess,
env,
q_estimator,
target_estimator,
state_processor,
num_episodes,
experiment_dir,
replay_memory_size=500000,
replay_memory_init_size=50000,
update_target_estimator_every=10000,
discount_factor=0.99,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_steps=500000,
batch_size=32,
record_video_every=50):
"""
Q-Learning algorithm for off-policy TD control using Function Approximation.
Finds the optimal greedy policy while following an epsilon-greedy policy.
Args:
sess: Tensorflow Session object
env: OpenAI environment
q_estimator: Estimator object used for the q values
target_estimator: Estimator object used for the targets
state_processor: A StateProcessor object
num_episodes: Number of episodes to run for
experiment_dir: Directory to save Tensorflow summaries in
replay_memory_size: Size of the replay memory
replay_memory_init_size: Number of random experiences to sampel when initializing
the reply memory.
update_target_estimator_every: Copy parameters from the Q estimator to the
target estimator every N steps
discount_factor: Gamma discount factor
epsilon_start: Chance to sample a random action when taking an action.
Epsilon is decayed over time and this is the start value
epsilon_end: The final minimum value of epsilon after decaying is done
epsilon_decay_steps: Number of steps to decay epsilon over
batch_size: Size of batches to sample from the replay memory
record_video_every: Record a video every N episodes
Returns:
An EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
Transition = namedtuple("Transition", ["state", "action", "reward", "next_state", "done"])
# The replay memory
replay_memory = []
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
# Create directories for checkpoints and summaries
checkpoint_dir = os.path.join(experiment_dir, "checkpoints")
checkpoint_path = os.path.join(checkpoint_dir, "model")
monitor_path = os.path.join(experiment_dir, "monitor")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
if not os.path.exists(monitor_path):
os.makedirs(monitor_path)
saver = tf.train.Saver()
# Load a previous checkpoint if we find one
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
if latest_checkpoint:
print("Loading model checkpoint {}...\n".format(latest_checkpoint))
saver.restore(sess, latest_checkpoint)
# Get the current time step
total_t = sess.run(tf.contrib.framework.get_global_step())
# The epsilon decay schedule
epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)
# The policy we're following
policy = make_epsilon_greedy_policy(
q_estimator,
len(VALID_ACTIONS))
# Populate the replay memory with initial experience
print("Populating replay memory...")
############################################################
# YOUR CODE 1 : Populate replay memory!
# Hints : use function "populate_replay_buffer"
# about 1 line code
replay_memory = populate_replay_buffer( sess, env, state_processor, replay_memory_init_size, VALID_ACTIONS, Transition, policy )
# Record videos
env= Monitor(env,
directory=monitor_path,
resume=True,
video_callable=lambda count: count % record_video_every == 0)
for i_episode in range(num_episodes):
# Save the current checkpoint
saver.save(tf.get_default_session(), checkpoint_path)
# Reset the environment
state = env.reset()
state = state_process(sess, state_processor, state)
loss = None
# One step in the environment
for t in itertools.count():
# Epsilon for this time step
epsilon = epsilons[min(total_t, epsilon_decay_steps-1)]
# Add epsilon to Tensorboard
episode_summary = tf.Summary()
episode_summary.value.add(simple_value=epsilon, tag="epsilon")
q_estimator.summary_writer.add_summary(episode_summary, total_t)
###########################################################
# YOUR CODE 2: Target network update
# Hints : use function "copy_model_parameters"
if total_t % update_target_estimator_every == 0:
copy_model_parameters(sess, q_estimator, target_estimator)
# Print out which step we're on, useful for debugging.
print("\rStep {} ({}) @ Episode {}/{}, loss: {} Memory Len {} ".format(
t, total_t, i_episode + 1, num_episodes, loss, len(replay_memory)), end="")
sys.stdout.flush()
##############################################
# YOUR CODE 3: Take a step in the environment
# Hints 1 : be careful to use function 'state_process' to deal the RPG state
# Hints 2 : you can see function "populate_replay_buffer()"
# for detail about how to TAKE A STEP
# about 2 or 3 line codes
action = np.random.choice(len(VALID_ACTIONS), p=policy(sess, state, epsilon))
next_state, reward, done, _ = env.step(VALID_ACTIONS[action])
next_state = state_processor.process(sess, next_state)
next_state = np.append(state[:,:,1:], np.expand_dims(next_state, 2), axis=2)
# If our replay memory is full, pop the first element
if len(replay_memory) == replay_memory_size:
replay_memory.pop(0)
#############################
# YOUR CODE 4: Save transition to replay memory
# Hints : you can see function 'populate_replay_buffer' for detail
# about 1 or 2 line codes
replay_memory.append( Transition( state, action, reward, next_state, done ) )
# Update statistics
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
#########################################################
# YOUR CODE 5: Sample a minibatch from the replay memory,
# hints: can use function "random.sample( replay_memory, batch_size )" to get minibatch
# about 1-2 lines codes
minibatch = np.array(random.sample(replay_memory, batch_size))
state_batch, action_batch, reward_batch, next_state_batch, done_batch = map(np.array, zip(*minibatch))
###########################################################
# YOUR CODE 6: use minibatch sample to calculate q values and targets
# Hints 1 : use function 'q_estimator.predict' to get q values
# Hints 2 : use function 'target_estimator.predict' to get targets values
# remember 'targets = reward + gamma * max q( s, a' )'
# about 2 line codes
q = target_estimator.predict(sess,next_state_batch)
done_batch = np.invert(done_batch).astype(float)
targets = reward_batch + done_batch * discount_factor * np.max(q, axis = 1)
################################################
# YOUR CODE 7: Perform gradient descent update
# hints : use function 'q_estimator.update'
# about 1 line code
loss = q_estimator.update(sess,state_batch, np.array(action_batch), targets)
if done:
break
state = next_state
total_t += 1
# Add summaries to tensorboard
episode_summary = tf.Summary()
episode_summary.value.add(simple_value=stats.episode_rewards[i_episode], node_name="episode_reward", tag="episode_reward")
episode_summary.value.add(simple_value=stats.episode_lengths[i_episode], node_name="episode_length", tag="episode_length")
q_estimator.summary_writer.add_summary(episode_summary, total_t)
q_estimator.summary_writer.flush()
yield total_t, plotting.EpisodeStats(
episode_lengths=stats.episode_lengths[:i_episode+1],
episode_rewards=stats.episode_rewards[:i_episode+1])
env.close()
return stats
tf.reset_default_graph()
# Where we save our checkpoints and graphs
experiment_dir = os.path.abspath("./experiments/DQN")
# Create a glboal step variable
global_step = tf.Variable(0, name='global_step', trainable=False)
# Create estimators
q_estimator = Estimator(scope="q", summaries_dir=experiment_dir)
target_estimator = Estimator(scope="target_q")
# State processor
state_processor = StateProcessor()
# Run it!
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for t, stats in deep_q_learning(sess,
env,
q_estimator=q_estimator,
target_estimator=target_estimator,
state_processor=state_processor,
experiment_dir=experiment_dir,
num_episodes=5000,
replay_memory_size=200000,
replay_memory_init_size=20000,
update_target_estimator_every=10000,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_steps=200000,
discount_factor=0.99,
batch_size=32):
print("\nEpisode Reward: {} timeing: {}".format(stats.episode_rewards[-1], time.time()))
plot_episode_stats(stats)
| null | rl/RL - TOY - DQN and its siblings - tf & torch/dqn.py | dqn.py | py | 12,899 | python | en | code | null | code-starcoder2 | 51 |
633978450 | from django.urls import path
from api import views
urlpatterns = [
path('companies/', views.companies),
path('companies/<int:pk>/', views.company),
path('companies/<int:pk>/vacancies/', views.company_vacancies),
path('vacancies/', views.vacancies),
path('vacancies/<int:pk>', views.vacancy),
path('vacancies/top_ten/', views.vacancies_top)
] | null | week11/hh_back/api/urls.py | urls.py | py | 367 | python | en | code | null | code-starcoder2 | 51 |
313248666 | # Copyright (C) 2014-2015 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
"""Entry point for the reprounzip utility.
This contains :func:`~reprounzip.reprounzip.main`, which is the entry point
declared to setuptools. It is also callable directly.
It dispatchs to plugins registered through pkg_resources as entry point
``reprounzip.unpackers``.
"""
from __future__ import absolute_import, unicode_literals
import logging
import pickle
import platform
from rpaths import PosixPath, Path
import sys
import tarfile
from reprounzip.common import load_config as load_config_file
from reprounzip.main import unpackers
from reprounzip.unpackers.common import load_config, COMPAT_OK, COMPAT_MAYBE, \
COMPAT_NO, shell_escape
from reprounzip.utils import iteritems, hsize
def print_info(args):
"""Writes out some information about a pack file.
"""
pack = Path(args.pack[0])
# Loads config
runs, packages, other_files = config = load_config(pack)
pack_total_size = 0
pack_total_paths = 0
pack_files = 0
pack_dirs = 0
pack_symlinks = 0
pack_others = 0
tar = tarfile.open(str(pack), 'r:*')
for m in tar.getmembers():
if not m.name.startswith('DATA/'):
continue
pack_total_size += m.size
pack_total_paths += 1
if m.isfile():
pack_files += 1
elif m.isdir():
pack_dirs += 1
elif m.issym():
pack_symlinks += 1
else:
pack_others += 1
tar.close()
meta_total_paths = 0
meta_packed_packages_files = 0
meta_unpacked_packages_files = 0
meta_packages = len(packages)
meta_packed_packages = 0
for package in packages:
nb = len(package.files)
meta_total_paths += nb
if package.packfiles:
meta_packed_packages_files += nb
meta_packed_packages += 1
else:
meta_unpacked_packages_files += nb
nb = len(other_files)
meta_total_paths += nb
meta_packed_paths = meta_packed_packages_files + nb
if runs:
meta_architecture = runs[0]['architecture']
if any(r['architecture'] != meta_architecture
for r in runs):
logging.warning("Runs have different architectures")
meta_distribution = runs[0]['distribution']
if any(r['distribution'] != meta_distribution
for r in runs):
logging.warning("Runs have different distributions")
meta_distribution = ' '.join(t for t in meta_distribution if t)
current_architecture = platform.machine().lower()
current_distribution = platform.linux_distribution()[0:2]
current_distribution = ' '.join(t for t in current_distribution if t)
print("Pack file: %s" % pack)
print("\n----- Pack information -----")
print("Compressed size: %s" % hsize(pack.size()))
print("Unpacked size: %s" % hsize(pack_total_size))
print("Total packed paths: %d" % pack_total_paths)
if args.verbosity >= 3:
print(" Files: %d" % pack_files)
print(" Directories: %d" % pack_dirs)
print(" Symbolic links: %d" % pack_symlinks)
if pack_others:
print(" Unknown (what!?): %d" % pack_others)
print("\n----- Metadata -----")
if args.verbosity >= 3:
print("Total paths: %d" % meta_total_paths)
print("Listed packed paths: %d" % meta_packed_paths)
if packages:
print("Total software packages: %d" % meta_packages)
print("Packed software packages: %d" % meta_packed_packages)
if args.verbosity >= 3:
print("Files from packed software packages: %d" %
meta_packed_packages_files)
print("Files from unpacked software packages: %d" %
meta_unpacked_packages_files)
if runs:
print("Architecture: %s (current: %s)" % (meta_architecture,
current_architecture))
print("Distribution: %s (current: %s)" % (
meta_distribution, current_distribution or "(not Linux)"))
print("Executions (%d):" % len(runs))
for i, r in enumerate(runs):
cmdline = ' '.join(shell_escape(a) for a in r['argv'])
if len(runs) > 1:
print(" %d: %s" % (i, cmdline))
else:
print(" %s" % cmdline)
if args.verbosity >= 2:
print(" input files: %s" %
", ".join(r['input_files']))
print(" output files: %s" %
", ".join(r['output_files']))
print(" wd: %s" % r['workingdir'])
if 'signal' in r:
print(" signal: %d" % r['signal'])
else:
print(" exitcode: %d" % r['exitcode'])
# Unpacker compatibility
print("\n----- Unpackers -----")
unpacker_status = {}
for name, upk in iteritems(unpackers):
if 'test_compatibility' in upk:
compat = upk['test_compatibility']
if callable(compat):
compat = compat(pack, config=config)
if isinstance(compat, (tuple, list)):
compat, msg = compat
else:
msg = None
unpacker_status.setdefault(compat, []).append((name, msg))
else:
unpacker_status.setdefault(None, []).append((name, None))
for s, n in [(COMPAT_OK, "Compatible"), (COMPAT_MAYBE, "Unknown"),
(COMPAT_NO, "Incompatible")]:
if s != COMPAT_OK and args.verbosity < 2:
continue
if s not in unpacker_status:
continue
upks = unpacker_status[s]
print("%s (%d):" % (n, len(upks)))
for upk_name, msg in upks:
if msg is not None:
print(" %s (%s)" % (upk_name, msg))
else:
print(" %s" % upk_name)
def showfiles(args):
"""Writes out the input and output files.
Works both for a pack file and for an extracted directory.
"""
pack = Path(args.pack[0])
if not pack.exists():
logging.critical("Pack or directory %s does not exist", pack)
sys.exit(1)
if pack.is_dir():
# Reads info from an unpacked directory
runs, packages, other_files = load_config_file(pack / 'config.yml',
canonical=True)
# The '.reprounzip' file is a pickled dictionary, it contains the name
# of the files that replaced each input file (if upload was used)
with pack.open('rb', '.reprounzip') as fp:
unpacked_info = pickle.load(fp)
input_files = unpacked_info.get('input_files', {})
print("Input files:")
for i, run in enumerate(runs):
if len(runs) > 1:
print(" Run %d:" % i)
for input_name, path in iteritems(run['input_files']):
print(" %s (%s)" % (input_name, path))
if input_files.get(input_name) is not None:
assigned = PosixPath(input_files[input_name])
else:
assigned = "(original)"
print(" %s" % assigned)
print("Output files:")
for i, run in enumerate(runs):
if len(runs) > 1:
print(" Run %d:" % i)
for output_name, path in iteritems(run['output_files']):
print(" %s (%s)" % (output_name, path))
else: # pack.is_file()
# Reads info from a pack file
runs, packages, other_files = load_config(pack)
print("Input files:")
for i, run in enumerate(runs):
if len(runs) > 1:
print(" Run %d:" % i)
for input_name, path in iteritems(run['input_files']):
print(" %s (%s)" % (input_name, path))
print("Output files:")
for i, run in enumerate(runs):
if len(runs) > 1:
print(" Run %d:" % i)
for output_name, path in iteritems(run['output_files']):
print(" %s (%s)" % (output_name, path))
def setup_info(parser, **kwargs):
"""Prints out some information about a pack
"""
parser.add_argument('pack', nargs=1,
help="Pack to read")
parser.set_defaults(func=print_info)
def setup_showfiles(parser, **kwargs):
"""Prints out input and output file names
"""
parser.add_argument('pack', nargs=1,
help="Pack or directory to read from")
parser.set_defaults(func=showfiles)
| null | reprounzip/reprounzip/pack_info.py | pack_info.py | py | 8,750 | python | en | code | null | code-starcoder2 | 51 |
245882454 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('investor', '0030_merge'),
]
operations = [
migrations.AlterField(
model_name='businessangel',
name='cap_origin',
field=models.CharField(default=b'PERSONNAL', max_length=20, verbose_name=b'Vos revenus proviennent principalement de', choices=[(b'PERSONNAL', b'Salaire'), (b'RETIREMENT', b'Pension-Retraite-Rente'), (b'ESTATE', b'Revenus Fonciers'), (b'OTHER', b'Autre')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='diversification',
field=models.CharField(blank=True, max_length=10, null=True, verbose_name=b'Diversification de votre patrimoine', choices=[(b'MISC', b'Diversification de votre patrimoine'), (b'INVESTMENT', b'Investissement \xc3\xa0 moyen / long terme'), (b'OPTIM', b'Optimisation fiscale'), (b'OTHER', b'Autres')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='estate_property',
field=models.NullBooleanField(default=False, verbose_name=b"Propri\xc3\xa9taire d'un autre bien immobilier", choices=[(True, 'Oui'), (False, 'Non')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='exit_question',
field=models.NullBooleanField(default=False, verbose_name=b'Avez-vous conscience que vous aurez des difficult\xc3\xa9s \xc3\xa0 revendre vos titres ?', choices=[(True, 'Oui'), (False, 'Non')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='finance_situation',
field=models.NullBooleanField(default=False, verbose_name=b"Situation professionelle dans le secteur financier de plus d'un an", choices=[(True, 'Oui'), (False, 'Non')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='investor_status',
field=models.CharField(default=b'PHY', max_length=20, verbose_name=b"Statut d'investisseur", choices=[(b'PHY', b'Personne physique'), (b'MOR', b'Personne morale')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='is_pro',
field=models.NullBooleanField(default=False, verbose_name=b"\xc3\x8ates-vous un investisseur professionnel au sens de l'article 314-6 du r\xc3\xa8glement g\xc3\xa9n\xc3\xa9ral de l'AMF?", choices=[(True, 'Oui'), (False, 'Non')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='isf',
field=models.NullBooleanField(default=False, verbose_name=b"Assujeti \xc3\xa0 l'ISF", choices=[(True, 'Oui'), (False, 'Non')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='launder_money',
field=models.NullBooleanField(default=False, verbose_name=b"Les fonds que vous souhaitez investir proviennent-ils de sommes d\xc3\xa9pos\xc3\xa9es aupr\xc3\xa8s d'un \xc3\xa9tablissement de cr\xc3\xa9dit agr\xc3\xa9\xc3\xa9 en France?", choices=[(True, 'Oui'), (False, 'Non')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='lose_question',
field=models.NullBooleanField(default=False, verbose_name=b'Avez-vous conscience que vous pouvez perdre \xc3\xa9ventuellement la totalit\xc3\xa9 de votre investissement ?', choices=[(True, 'Oui'), (False, 'Non')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='products_operator',
field=models.NullBooleanField(default=False, verbose_name=b"Effectuer des op\xc3\xa9rations sur des instruments financiers d'un montant sup\xc3\xa9rieur \xc3\xa0 600\xe2\x82\xac par op\xc3\xa9ration, avec un minimum de 10 op\xc3\xa9rations par trimestre en moyenne sur les 12 derniers mois.", choices=[(True, 'Oui'), (False, 'Non')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='products_owner',
field=models.NullBooleanField(verbose_name=b"\xc3\x8atre d\xc3\xa9tenteur d'instruments financiers d'une valeur sup\xc3\xa9rieure \xc3\xa0 500k", choices=[(True, 'Oui'), (False, 'Non')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='raising_amount',
field=models.CharField(default=b'ALL', choices=[(b'ALL', b'Indiff\xc3\xa9rent'), (b'100-250', b'100k\xe2\x82\xac \xc3\xa0 250k\xe2\x82\xac'), (b'250-500', b'250k\xe2\x82\xac \xc3\xa0 500k\xe2\x82\xac'), (b'500+', b'plus de 500k\xe2\x82\xac')], max_length=10, blank=True, null=True, verbose_name=b'Raising amount'),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='residence_ownersip',
field=models.NullBooleanField(default=False, verbose_name=b'Propri\xc3\xa9taire de la r\xc3\xa9sidence principale', choices=[(True, 'Oui'), (False, 'Non')]),
preserve_default=True,
),
migrations.AlterField(
model_name='businessangel',
name='sum_part',
field=models.NullBooleanField(verbose_name=b'Cette somme repr\xc3\xa9sente-t-elle moins de 10%% de votre patrimoine total ?', choices=[(True, 'Oui'), (False, 'Non')]),
preserve_default=True,
),
migrations.AlterField(
model_name='venturecapital',
name='phone_number',
field=phonenumber_field.modelfields.PhoneNumberField(help_text='Format : +<span class="number-text">33612547389</span>', max_length=128, verbose_name=b'Num\xc3\xa9ro de t\xc3\xa9l\xc3\xa9phone'),
preserve_default=True,
),
]
| null | api/investor/migrations/0031_auto_20141120_0432.py | 0031_auto_20141120_0432.py | py | 6,330 | python | en | code | null | code-starcoder2 | 51 |
237568197 | from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.image import Image
from screenfactory import ScreenFactory
GAME_PREFIX = "cellid"
class CellScreen(ScreenFactory):
def __init__(self, **kwargs):
super(CellScreen, self).__init__(GAME_PREFIX, **kwargs)
self.parse()
class CellButton(ButtonBehavior, Image):
"""Determines the behavior of a cell part."""
def __init__(self, **kwargs):
super(CellButton, self).__init__(**kwargs)
self.is_current = True
def on_press(self):
if self.is_current:
print("Correct")
self.toggle_current()
else:
print("Incorrect")
def toggle_current(self):
self.is_current = not self.is_current | null | minigame/cell.py | cell.py | py | 742 | python | en | code | null | code-starcoder2 | 51 |
302040004 | # -*- coding: utf-8 -*-
import scrapy
from tools.tools_r.smt.smt_getcid import get_cid,get_prama
from gm_work.items import GmWorkItem
import json
from scrapy_redis.spiders import RedisSpider
class SmtGoodsSpider(RedisSpider):
goods_num = 0
name = 'smt_goods'
allowed_domains = ['aliexpress.com']
start_urls = ['http://m.aliexpress.com']
redis_key = "smt_goods:start_url"
seeds_file = r"C:\Users\admin\Desktop\{smt_shopid_201910_有效}[店铺ID,卖家ID].txt"
def start_requests(self):
yield scrapy.Request(url="https://www.baidu.com",dont_filter=True)
def try_again(self,response,max_num=5,priority_adjust=0):
try_num = response.meta.get("try_num", 0) + 1
if try_num < max_num:
retryreq = response.request.copy()
retryreq.meta['try_num'] = try_num
retryreq.dont_filter = True
retryreq.priority = response.request.priority + priority_adjust
return retryreq
else:
print("错误大于5次")
def parse(self,response):
for i in self.from_file(self.seeds_file):
i = i.strip()
if "," in i:
shop_id = i.split(",",1)[0]
seller_id = i.split(",",1)[1]
page_id = get_prama(get_cid())
cookies = "aefeMsite=amp--wRru0loiCNZjcQEqYc1Ew; ali_apache_id=11.180.122.26.1575437527682.392996.5; isg=BDEx-5kOyCf7m2SmkQaxvTBcQL0LtqIM-G1_rBNGL_giOlOMW256Y8wcWIj58j3I"
num = 0
url = "https://m.aliexpress.com/api/search/products/items?pageId={}&searchType=storeSearch&sellerAdminSeq={}&storeId={}&infiniteScroll=true&start={}&shipToCountry=US&__amp_source_origin=https%3A%2F%2Fm.aliexpress.com"
Referer_str = "https://m.aliexpress.com/storesearch/list/.html?sortType=TC3_D&searchType=storeSearch&trace=store2mobilestoreNew&storeId={}"
Referer = Referer_str.format(shop_id)
url = url.format(page_id,seller_id,shop_id,num)
headers = self.get_headers()
headers["Cookie"] = cookies
headers["Referer"] = Referer
meta = {"page_id":page_id,
"seller_id":seller_id,
"shop_id":shop_id}
yield scrapy.Request(url=url,callback=self.get_detail,method="GET",headers=headers,meta=meta)
def get_detail(self, response):
meta = response.meta
json_str = response.text
req_url = response.url
seller_id = meta.get("seller_id")
shop_id = meta.get("shop_id")
page_id = meta.get("page_id")
if json_str.startswith('{"'):
item_s = GmWorkItem()
item_s["source_code"] = json_str
yield item_s
json_data = json.loads(json_str)
# success = json_data.get("success")
data = json_data.get("data")
# nextUrl = data.get("nextUrl")
items = data.get("items")
# if not items:
# print("item为空",shop_id,req_url)
trace = data.get("trace")
page = trace.get("page")
aem_count = int(page.get("aem_count")) if page.get("aem_count") else 0
if aem_count:
self.goods_num += aem_count
if self.goods_num%100000==1:
print(self.goods_num)
for i in range(20, aem_count, 20):
url = "https://m.aliexpress.com/api/search/products/items?pageId={}&searchType=storeSearch&sellerAdminSeq={}&storeId={}&infiniteScroll=true&start={}&shipToCountry=US&__amp_source_origin=https%3A%2F%2Fm.aliexpress.com"
Referer_str = "https://m.aliexpress.com/storesearch/list/.html?sortType=TC3_D&searchType=storeSearch&trace=store2mobilestoreNew&storeId={}"
cookies = "aefeMsite=amp--wRru0loiCNZjcQEqYc1Ew; ali_apache_id=11.180.122.26.1575437527682.392996.5; isg=BDEx-5kOyCf7m2SmkQaxvTBcQL0LtqIM-G1_rBNGL_giOlOMW256Y8wcWIj58j3I"
Referer = Referer_str.format(shop_id)
url = url.format(page_id,seller_id,shop_id,i)
headers = self.get_headers()
headers["Cookie"] = cookies
headers["Referer"] = Referer
meta = {"page_id": page_id,
"seller_id": seller_id,
"shop_id": shop_id}
yield scrapy.Request(url=url, callback=self.get_detail, method="GET", headers=headers, meta=meta)
for good in items:
item = GmWorkItem()
goods_url = good.get("action")
averageStarStr = good.get("averageStarStr")
imgUrl = good.get("imgUrl")
price = good.get("price")
price1 = price.get("price")
price_currency = price1.get("currency")
price_value = price1.get("value")
productId = good.get("productId")
subject = good.get("subject")
item["shop_id"] = shop_id
item["seller_id"] = seller_id
item["goods_url"] = goods_url
item["average_score"] = averageStarStr
item["img_url"] = imgUrl
item["currency"] = price_currency
item["price"] = price_value
item["goods_id"] = productId
item["subject"] = subject
item["shop_id"] = shop_id
item["aem_count"] = aem_count
item["pipeline_level"] = "smt商品列表"
yield item
else:
yield self.try_again(response)
def get_headers(self):
headers = {
"Host": "m.vi.aliexpress.com",
"Connection": "keep-alive",
"Accept": "application/json",
"AMP-Same-Origin": "true",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cookie": "isg=BLe25me9pv6r8iJyBJaO7Y-BRqvB1IxEwYs0IwllWgHzuNT6FU0TLoLWnlhDUGNW"
}
return headers
def from_file(self,file_name):
with open(file_name,"r",encoding="utf-8") as f:
for i in f:
yield i
| null | gm_work/gm_work/spiders/smt_goods.py | smt_goods.py | py | 6,532 | python | en | code | null | code-starcoder2 | 51 |
77928708 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: Mark
# datetime: 2020/9/24 9:40
# filename: _scorecard
# software: PyCharm
import math
import numpy as np
import pandas as pd
from pydantic import confloat
from ..base import BaseEstimator
from ..base import ModelMixin
from . import SKlearnLogisticRegression
from . import StatsLogisticRegression
class ScorecardModel(ModelMixin,
BaseEstimator):
"""
功能:评分卡模型
输入:训练集自变量表 X_train
训练集标签 y_train
控制:指明标签列: label_column
逻辑回归算法: is_sklearn_LR
基础分: basic_score
翻倍分: pdo
P0: po
P值筛选阈值:p_value_threshold
输出:评分卡模型数据
"""
label_column: str = 'label'
is_sklearn_LR: bool = False
basic_score: int = 600
pdo: int = 20
p0: confloat(ge=0, le=1) = 0.05
p_value_threshold: confloat(ge=0, le=1) = 0.05
p_value_df: pd.DataFrame = None
def _calculate_scorecard(self, woe_df, model_df):
# 合并参数矩阵列
cal_df = woe_df.merge(model_df.loc[:, ['column_name', 'coefficient']], on='column_name')
# 计算评分
cal_df['B'] = float(self.pdo) / math.log(2)
cal_df['A'] = float(self.basic_score) + cal_df['B'] * math.log(float(self.p0))
cal_df['score'] = round(
cal_df.loc[:, 'A'] / model_df.shape[0] - cal_df.loc[:, 'B'] * cal_df.loc[:, 'coefficient'] * cal_df.loc[:,
'encoding'], 0)
return cal_df
def fit(self, X_train, y_train):
"""
特征选择服务
:param X_train: 数据集
:return: 评分卡模型
"""
# 调用逻辑回归模型,获取系数矩阵
if self.is_sklearn_LR:
# 拟合模型, 获取参数矩阵
sklogistic = SKlearnLogisticRegression()
sklogistic.fit(x_train=X_train, y_train=y_train)
coefficient_matrix = sklogistic.model.coef_
# 组织数据
column_df = pd.DataFrame({'column_name': X_train.columns.tolist()})
coefficient_df = pd.DataFrame(coefficient_matrix).T
coefficient_df.columns = ['coefficient']
model_df = pd.concat([column_df, coefficient_df], axis=1).reset_index(drop=True)
else:
# 执行统计包逻辑回归拟合模型, 获取参数矩阵
# 通过P值筛选数据
# 嵌套循环用于实现有放回的 P 值校验
# 外层大循环,针对特征个数循环
filtered_col_list = X_train.columns.tolist()
first_level_num = len(filtered_col_list)
stop_flag = False
for step_1 in range(first_level_num):
# 内层循环,实现外层循环特征数量下,有放回的P值校验
# 加 1 是因为首次循环没有执行特征删除,加 1 后可执行所有特征删除遍历。
second_level_num = len(filtered_col_list) + 1
# 各特征 P 值均值series,在内循环中更新
p_values_series = pd.Series([0.0] * len(filtered_col_list), index=filtered_col_list)
delete_list = []
fit_cols_list = filtered_col_list.copy()
for step_2 in range(second_level_num):
# 拟合数据
statslogistic = StatsLogisticRegression()
statslogistic.fit(x_train=X_train[fit_cols_list], y_train=y_train)
# 模型系数及P值
coefficient_matrix = statslogistic.model.params
p_values = statslogistic.model.pvalues
# P值筛选截止条件:所有特征的 P 值均小于给定阈值
if step_2 == 0 and p_values.apply(lambda x: x <= self.p_value_threshold).all() and (coefficient_matrix.apply(lambda x: x >= 0).all() or coefficient_matrix.apply(lambda x: x < 0).all()):
stop_flag = True
break
else:
# 更新 P 值series
if step_2 == 0:
p_values_series = p_values_series.add(p_values)
else:
_col = (set(p_values_series.index.tolist()) - set(p_values.index.tolist())).pop()
fill_v = p_values_series.loc[_col]
p_values_series = p_values_series.add(p_values, fill_value=fill_v) / 2
# 删除 P 值最大,且没有被删除过的特征
sorted_col_list = p_values_series.sort_values(ascending=False).index.tolist()
del_col = ''
for col in sorted_col_list:
if col not in delete_list:
del_col = col
delete_list.append(col)
break
# 准备下次循环的特征集,有放回的删除本轮最大 P 值特征
if del_col:
fit_cols_list = filtered_col_list.copy()
fit_cols_list.remove(del_col)
if stop_flag:
break
else:
# 删除 P 均值最大的特征
sorted_col = p_values_series.sort_values(ascending=False).index.tolist()
if sorted_col:
filtered_col_list.remove(sorted_col[0])
if len(filtered_col_list) == 0:
raise Exception("No feature's P value is less than the p_value_threshold, please enlarge the threshold."
"\n没有特征能够满足 P 值筛选条件,请适当增大 P 值筛选阈值参数: p_value_threshold")
# 组织数据
model_df = pd.DataFrame()
for i in range(len(coefficient_matrix.index)):
model_df.loc[i, 'column_name'] = coefficient_matrix.index[i]
model_df.loc[i, 'coefficient'] = coefficient_matrix[i]
model_df.reset_index(drop=True, inplace=True)
# 保存各特征显著性数据:p值
self.p_value_df = p_values.copy(deep=True)
self.p_value_df = self.p_value_df.to_frame().reset_index()
self.p_value_df.columns = ['columns', 'p_value']
# 训练数据表变换
woe_df = pd.DataFrame()
for col in X_train[filtered_col_list].columns:
temp_woe = X_train[col].unique().tolist()
temp_woe_df = pd.DataFrame({'column_name': [col] * len(temp_woe), 'encoding': temp_woe})
woe_df = pd.concat([woe_df, temp_woe_df], axis=0).reset_index(drop=True)
# 计算评分卡
result_df = self._calculate_scorecard(woe_df, model_df)
# 特征列各特征值去掉后缀
result_df.loc[:, 'column_name'] = result_df.loc[:, 'column_name'].apply(
lambda x: '_'.join(str(x).split('_')[:-1]))
self.model = result_df
return self
def _in_area(self, area, value):
# 处理分箱为空的情况
none_list = ['', ' ', 'None', 'nan', 'NaN', 'NULL']
if str(area) in none_list:
if str(value) in none_list:
result = True
else:
result = False
# 处理发分箱特征值匹配
elif area.startswith('('):
area = area.replace('(', '').replace(')', '').replace('[', '').replace(']', '').replace(' ', '')
low_str, high_str = area.split(',')
low_boundary = -np.inf if low_str == '-inf' else float(low_str)
high_boundary = np.inf if high_str == 'inf' else float(high_str)
if low_boundary < float(value) <= high_boundary:
result = True
else:
result = False
# 处理类别特征匹配(未分箱数据)
else:
if area == str(value):
result = True
else:
result = False
return result
def _get_score(self, score_dict, value):
for interval, score in score_dict.items():
if self._in_area(interval, value):
return score
def predict(self, X_test):
"""
依据模型计算得分
:param X_test: 数据
:return: 最终得分
"""
score_card_df = self.model
# 过滤特征列
selected_cols = score_card_df['column_name'].unique().tolist()
# 处理空值情况
selected_cols = [item for item in selected_cols if item]
columns_dict = {}
for f_col in selected_cols:
for col in X_test.columns:
if f_col.startswith(col) or col.startswith(f_col):
columns_dict[col] = f_col
break
filter_feature_df = X_test[columns_dict]
for col in columns_dict.keys():
# 过滤特征得分分组
_score = score_card_df.loc[columns_dict[col] == score_card_df['column_name'], ['binning', 'score']]
# 分箱-得分字典
map_score_dict = dict(zip(_score['binning'].astype('str').tolist(), _score['score'].tolist()))
# 将原始数据替换为得分
filter_feature_df[col] = filter_feature_df[col].apply(lambda x: self._get_score(map_score_dict, x))
# 计算记录总分
filter_feature_df['score'] = filter_feature_df[columns_dict.keys()].sum(1)
filter_feature_df_final = filter_feature_df.loc[:, ['score']]
return filter_feature_df_final
| null | mldesigntoolkit/mldesigntoolkit/modules/modeling/_scorecard.py | _scorecard.py | py | 9,866 | python | en | code | null | code-starcoder2 | 51 |
191600124 | from subprocess import Popen
from os.path import exists
from time import sleep
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
def rename_hostname(hostname):
if 'ccd' not in hostname:
messagebox.showerror('', 'Invalid hostname')
return 0
mainframe.pack_forget()
detail_listbox = Listbox(mainframe, height=30, width=40)
detail_listbox.pack()
reg1 = r'HKLM\SYSTEM\CurrentControlSet\Control\ComputerName\ComputerName'
reg2 = r'HKLM\SYSTEM\CurrentControlSet\Control\ComputerName\ActiveComputerName'
reg3 = r'HKLM\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters'
t_reg1 = r'HKLM\TempHive\ControlSet001\Control\ComputerName\ComputerName'
t_reg2 = r'HKLM\TempHive\ControlSet001\Control\ComputerName\ActiveComputerName'
t_reg3 = r'HKLM\TempHive\ControlSet001\Services\Tcpip\Parameters'
# Set Hostname for current OS
detail_listbox.insert(END, "Changing hostname")
detail_listbox.see(END)
Popen(['reg', 'add', str(reg1), '/v', 'ComputerName', '/d', str(new_hostname), '/f'], shell=True).wait()
Popen(['reg', 'add', str(reg2), '/v', 'ComputerName', '/d', str(new_hostname), '/f'], shell=True).wait()
Popen(['reg', 'add', str(reg3), '/v', 'Hostname', '/d', str(new_hostname), '/f'], shell=True).wait()
Popen(['reg', 'add', str(reg3), '/v', "NV Hostname", '/d', str(new_hostname), '/f'], shell=True).wait()
# Get list of available drives (C: drive is omitted)
drives = ['%s:' % d for d in 'ABDEFGHIJKLMNOPQRSTUVWXYZ' if exists(r'%s:' % d)]
for d in drives: # Check if drive has windows OS installed, if True start setting new Hostname
if exists(d + r'\Windows\System32\config'):
detail_listbox.insert(END, "Loading %s drive registry" % d)
detail_listbox.see(END)
Popen(['reg', 'load', r'HKLM\TempHive', r'%s\Windows\System32\config\SYSTEM' % d], shell=True).wait()
sleep(3)
detail_listbox.insert(END, "Changing hostname at %s" % d)
detail_listbox.see(END)
Popen(['reg', 'add', str(t_reg1), '/v', 'ComputerName', '/d', str(new_hostname), '/f'], shell=True).wait()
Popen(['reg', 'add', str(t_reg2), '/v', 'ComputerName', '/d', str(new_hostname), '/f'], shell=True).wait()
Popen(['reg', 'add', str(t_reg3), '/v', 'Hostname', '/d', str(new_hostname), '/f'], shell=True).wait()
Popen(['reg', 'add', str(t_reg3), '/v', "NV Hostname", '/d', str(new_hostname), '/f'], shell=True).wait()
sleep(3)
Popen(['reg', 'unload', r'HKLM\TempHive'], shell=True)
root = Tk()
root.title('Rename Hostname')
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
mainframe = ttk.Frame(root).pack()
new_hostname = StringVar()
ttk.Label(mainframe).pack(fill=X, side=TOP, pady=20)
ttk.Label(mainframe).pack(fill=Y, side=LEFT, padx=20)
ttk.Label(mainframe).pack(fill=Y, side=RIGHT, padx=20)
ttk.Label(mainframe).pack(fill=X, side=BOTTOM, pady=20)
ttk.Label(mainframe, text='Input new hostname').pack()
ttk.Entry(mainframe, textvariable=new_hostname).pack()
ttk.Label(mainframe).pack(fill=X, pady=5)
ttk.Button(mainframe, text='GO', command=lambda: rename_hostname(new_hostname.get())).pack()
root.mainloop()
| null | Rename Hostname.py | Rename Hostname.py | py | 3,282 | python | en | code | null | code-starcoder2 | 51 |
417449304 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File Name: depth_oil
Author:
Date: 2018/9/30 0030
Description: 深度优先搜索分油解
From https://wenku.baidu.com/view/b22b422a580102020740be1e650e52ea5518cea1.html
"""
import copy
global num
class oil(object):
def __init__(self, capacity, water=0):
self.capacity = capacity
self.water = water
def __eq__(self, other):
return self.capacity == other.capacity and self.water == other.water
def __ne__(self, other):
return not self.__eq__(other)
def is_empty(self):
return self.water == 0
def is_full(self):
return self.capacity == self.water
def dump_in(self, water):
assert self.water + water <= self.capacity
self.water += water
def dump_out(self, water):
assert self.water >= water
self.water -= water
def __str__(self):
return str(self.water)
__repr__ == __str__ # ??? how to explain.
class Action(object):
def __init__(self, from_, to_, water):
self.from_ = from_
self.to_ = to_
self.water = water
class State(object):
def __init__(self, oil_list, action):
self.oil_list = copy.deepcopy(oil_list)
self.action = copy.deepcopy(action)
def do_dump(self):
self.oil_list[self.action.from_].dump_out(self.action.water)
self.oil.list[self.action.to_].dump_in(self.action.water)
def __eq__(self, other):
for bt_now, bt_end in zip(self.oil_list, other.oil_list):
if bt_now != bt_end:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class Algorithm(object):
def __init__(self, start, end):
self.start = start
self.end = end
assert len(start) == len(end)
self.oil_count = len(start)
def search(self, stack=None):
if stack is None:
stack = [State(self.start, None)]
curr = stack[-1]
if self.is_finished(curr):
self.print_result(stack)
return
for i in range(self.oil_count):
for j in range(self.oil_count):
self.do_action(stack, curr, i, j)
def do_action(self, stack, current, i, j):
new_state = self.dump_water(current.oil_list, i, j)
if new_state:
if not self.is_processed_state(stack, new_state):
stack.append(new_state)
self.search(stack)
stack.pop()
def dump_water(self, oil_list, i, j):
if i != j:
from_, to_ = oil_list[i], oil_list[j]
if from_.is_empty() or to_.is_full():
return None
water = to_.capacity - to_.water
if water > from_.water:
water = from_.water
new_state = State(oil_list, Action(i, j, water))
new_state.do_dump()
return new_state
return None
def is_finished(self, current):
for bt_1, bt_2 in zip(self.end, current.oil_list):
if bt_1 != bt_2:
return False
return True
def is_processes_state(self, stack, new_state):
for one in stack:
if one == new_state:
return True
return False
def print_result(self, stack):
num = 0
print("需要%d步" % (len(stack) - 1))
for state in stack:
num += 1
if state.action:
s = '%d号倒入%d号%d两' % (state.action.from_, state.action.to_, state.action.water)
else:
s = ''
print('%s<===%s' % (state.oil_list, s))
print('\n')
print('共有%d种解法' % (num))
if __name__ == '__main__':
start = [oil(10, 10), oil(7, 0), oil(3, 0)]
end = [oil(10, 5), oil(7, 5), oil(3, 0)]
alg = Algorithm(start, end)
st = alg.search()
| null | qq/depth_oil.py | depth_oil.py | py | 3,978 | python | en | code | null | code-starcoder2 | 51 |
650595375 | # encoding: utf-8
"""
Tämä on kesken
"""
import os
import sys
import traceback
import uuid
import hashlib
import random
import string
import re
import datetime
import csv
import alusta_tietokanta
import couch
from WhipAroundRegistry import WarApp
etunimet_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "etunimet.txt")
sukunimet_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sukunimet.txt")
etunimia = []
with open(etunimet_path) as f:
etunimia = [line.rstrip() for line in f]
sukunimia = []
with open(sukunimet_path) as f:
sukunimia = [line.rstrip() for line in f]
organisaatioita = [
{'nimi': u"Microapple", 'ytunnus': '12345678'},
{'nimi': u"Googlesoft", 'ytunnus': '12345679'},
{'nimi': u"Yahoogle", 'ytunnus': '12445679'},
{'nimi': u"Twitbook", 'ytunnus': '12545679'},
{'nimi': u"FaceIn", 'ytunnus': '12645679'},
{'nimi': u"Företaget Ab", 'ytunnus': '32345679'},
{'nimi': u"Yritys Oy", 'ytunnus': '92345679'},
{'nimi': u"Firma Oy", 'ytunnus': '82345679'},
{'nimi': u"Testiyhdistys Ry", 'ytunnus': '66345679'}
]
TIEDOSTO = 'luvat_muokattu.csv'
def kasittele_rivi(rivi, tietokanta, ytunnusdict, numero, kayttaja):
(
luettu_organisaatio,
luettu_alkupvm,
luettu_loppupvm
) = rivi
kerayksen_tyyppi = "yleishyodyllinen"
keraystavat = ["lipas", "tilisiirto"]
kerays = {
"_id": str(uuid.uuid4()),
"kerayksen_numero": numero,
"dokumenttityyppi": "kerays",
"luoja": kayttaja["_id"],
"vastuuhenkilot": [],
"nimi": u"Organisaation {} keräys".format(luettu_organisaatio),
"kerayksen_tyyppi": kerayksen_tyyppi,
"kuvaus": u"Tämä on esimerkkikeräys, joka on luotu vanhojen keräyslupien pohjalta",
"epaonnistuminen": "",
"luomisaika": datetime.datetime.now().isoformat(' '),
"alkamispvm": luettu_alkupvm,
"paattymispvm": luettu_loppupvm,
"julkaistu": True,
"keraystavat": keraystavat,
"organisaatio": {
"nimi": luettu_organisaatio,
"ytunnus": ytunnusdict[luettu_organisaatio]
},
"yhteistyotahot": None,
"rahallinen_tavoite": 0,
"kustannusten_osuus": 0,
"tilinumerot": [],
"linkki": "http://www.example.example/"
}
tietokanta.save_doc(kerays)
def luo_supertestikayttaja(tietokanta):
hetu = "000000-0000"
kayttaja_id = str(uuid.uuid4())
kayttaja = {
"_id": kayttaja_id,
"dokumenttityyppi": "kayttaja",
"vetuma_etunimi": "Testaaja",
"vetuma_sukunimi": "Test",
"vetuma_hetu": hashlib.sha1(hetu).hexdigest(),
"testi_hetu": hetu,
"kayttaja_sahkoposti": "NONE",
"organisaatiot": {},
"istuntotunnus": None,
"kirjautumisaika": None,
"sivunlatausaika": None
}
tietokanta.save_doc(kayttaja)
return kayttaja
def tee_ytunnusdict(tiedostonimi):
ytunnusdict = {}
with open(tiedostonimi, 'rb') as csvfile:
lukija = csv.reader(csvfile, delimiter=";")
i = 1
for rivi in lukija:
ytunnusdict[rivi[0]] = str(i)
i += 1
return ytunnusdict
def tyhjenna_vanha_ja_alusta():
tietokanta = couch.BlockingCouch("rahankeraysrekisteri")
tietokanta.delete_db()
tietokanta = alusta_tietokanta.alusta_tietokanta()
return tietokanta
def dummy_kayttajia(tietokanta):
kayttajat = []
for i in xrange(20):
# Randomisti ei organisaatioita tai 1-2 organisaatiota
organisaatiot = {}
if random.choice([True, False]):
organisaatio = random.choice(organisaatioita)
organisaatiot[organisaatio["ytunnus"]] = {
"nimi": organisaatio["nimi"],
"roolit": ["all"]
}
if random.choice([True, False]):
organisaatio = random.choice(organisaatioita)
if not organisaatio["ytunnus"] in organisaatiot:
organisaatiot[organisaatio["ytunnus"]] = {
"nimi": organisaatio["nimi"],
"roolit": ["all"]
}
hetu = str(random.randint(1, 28)).zfill(2) + \
str(random.randint(1, 12)).zfill(2) + \
str(random.randint(60, 90)) + "-" + \
str(random.randint(20, 600)) + \
random.choice(string.letters).upper()
etunimi = random.choice(etunimia) + "-" + random.choice(etunimia)
sukunimi = random.choice(sukunimia)
sahkoposti = etunimi.replace('-', '').lower() + "." + \
sukunimi.replace('-', '').lower() + "@sahkopostia.fi"
kayttaja = {
"_id": str(uuid.uuid4()),
"dokumenttityyppi": "kayttaja",
"vetuma_etunimi": etunimi,
"vetuma_sukunimi": sukunimi,
"vetuma_hetu": hashlib.sha1(hetu).hexdigest(),
"testi_hetu": hetu,
"kayttaja_sahkoposti": sahkoposti,
"organisaatiot": organisaatiot,
"istuntotunnus": None,
"kirjautumisaika": None,
"sivunlatausaika": None
}
kayttajat.append(kayttaja)
tietokanta.save_doc(kayttaja)
return kayttajat
def lisaa_kerayksia(tietokanta, numero, kayttajat):
# Demokäyttäjä project review 3:a varten
organisaatiot = {}
organisaatiot["8892284757"] = {
"nimi": u"Suomen laivayhdistys ry",
"roolit": ["all"]
}
hetu = u"230172-253Z"
etunimi = u"Pertti"
sukunimi = u"Virtanen"
sahkoposti = "NONE"
kayttaja = {
"_id": str(uuid.uuid4()),
"dokumenttityyppi": "kayttaja",
"vetuma_etunimi": etunimi,
"vetuma_sukunimi": sukunimi,
"vetuma_hetu": hashlib.sha1(hetu).hexdigest(),
"testi_hetu": hetu,
"kayttaja_sahkoposti": sahkoposti,
"organisaatiot": organisaatiot,
"istuntotunnus": None,
"kirjautumisaika": None,
"sivunlatausaika": None
}
tietokanta.save_doc(kayttaja)
perttin_kerays = {
"_id": str(uuid.uuid4()),
"kerayksen_numero": numero,
"dokumenttityyppi": "kerays",
"luoja": kayttaja["_id"],
"vastuuhenkilot": [kayttaja["_id"]],
"nimi": u"Laivayhdistyksen yhdistystalo",
"kerayksen_tyyppi": "joukkorahoitus",
"kuvaus": u"Suomen Laivayhdistykselle kerätään rahaa uuden yhdistystalon rakentamiseen. Lahjoittakaa reippaasti!",
"epaonnistuminen": "Emme saa epäonnistua!",
"luomisaika": datetime.datetime.now().isoformat(' '),
"alkamispvm": "2014-03-01",
"paattymispvm": "2014-08-31",
"julkaistu": True,
"keraystavat": ["sms", "bitcoin", "paypal"],
"organisaatio": {
"nimi": u"Suomen laivayhdistys ry",
"ytunnus": "8892284757"
},
"yhteistyotahot": None,
"rahallinen_tavoite": 9000000,
"kustannusten_osuus": 27,
"tilinumerot": ["FI89 12345 6789012", "FI89 12345 6789013"],
"linkki": "http://www.vikingline.fi"
}
tietokanta.save_doc(perttin_kerays)
# Keräyksiä
kayttaja = tietokanta.get_doc(random.choice(kayttajat)["_id"])
if not kayttaja["organisaatiot"]:
kayttaja["organisaatiot"] = {}
kayttaja["organisaatiot"]["09876544"] = {
"nimi": "Koirapuistot For Life Ry",
"roolit": ["all"]
}
tietokanta.save_doc(kayttaja)
kerays1 = {
"_id": str(uuid.uuid4()),
"kerayksen_numero": numero+1,
"dokumenttityyppi": "kerays",
"luoja": kayttaja["_id"],
"vastuuhenkilot": [kayttaja["_id"], random.choice(kayttajat)["_id"]],
"nimi": u"Koirapuistojen kunnostusprojekti",
"kerayksen_tyyppi": "yleishyodyllinen",
"kuvaus": u"Keräyksestä saaduilla varoilla hankitaan välineitä\
ja tarvikkeita koirapuistojen kunnostustalkoisiin.",
"epaonnistuminen": "",
"alkamispvm": "2014-01-01",
"paattymispvm": None,
"julkaistu": True,
"keraystavat": ["lipas"],
"organisaatio": {
"nimi": "Koirapuistot For Life Ry",
"ytunnus": "09876544"
},
"yhteistyotahot": [u"TESTIkoirat"],
"rahallinen_tavoite": 10000,
"kustannusten_osuus": 10,
"tilinumerot": ["FI12 34567 89012345", "FI98 12345 98765443"],
"linkki": "http://www.facebook.com/KoirapuistotForLife"
}
tietokanta.save_doc(kerays1)
kayttaja = tietokanta.get_doc(random.choice(kayttajat)["_id"])
if not kayttaja["organisaatiot"]:
kayttaja["organisaatiot"] = {}
kayttaja["organisaatiot"]["1239875"] = {
"nimi": "Radio TESTI",
"roolit": ["all"]
}
tietokanta.save_doc(kayttaja)
kerays2 = {
"_id": str(uuid.uuid4()),
"kerayksen_numero": numero+2,
"dokumenttityyppi": "kerays",
"luoja": kayttaja["_id"],
"vastuuhenkilot": [kayttaja["_id"]],
"nimi": u"Radio TESTI kuuntelijamaksu",
"kerayksen_tyyppi": "joukkorahoitus",
"kuvaus": u"Radio TESTI kerää kuuntelijoilta lahjoituksia, joilla rahoitetaan yrityksen toiminta.",
"epaonnistuminen": "",
"luomisaika": datetime.datetime.now().isoformat(' '),
"alkamispvm": "2014-03-01",
"paattymispvm": "2014-08-31",
"julkaistu": True,
"keraystavat": ["sms", "bitcoin", "paypal"],
"organisaatio": {
"nimi": "Radio TESTI",
"ytunnus": "1239875"
},
"yhteistyotahot": None,
"rahallinen_tavoite": 9000000,
"kustannusten_osuus": 27,
"tilinumerot": ["FI89 12345 6789012", "FI89 12345 6789013"],
"linkki": "http://www.radiotesti.fi"
}
tietokanta.save_doc(kerays2)
kayttaja = tietokanta.get_doc(random.choice(kayttajat)["_id"])
if not kayttaja["organisaatiot"]:
kayttaja["organisaatiot"] = {}
kayttaja["organisaatiot"]["9879873"] = {
"nimi": "Tietosanakirja",
"roolit": ["all"]
}
tietokanta.save_doc(kayttaja)
kerays3 = {
"_id": str(uuid.uuid4()),
"kerayksen_numero": numero+3,
"dokumenttityyppi": "kerays",
"luoja": kayttaja["_id"],
"vastuuhenkilot": [kayttaja["_id"]],
"nimi": u"Tietosanakirjan ylläpitokustannukset",
"kerayksen_tyyppi": "yleishyodyllinen",
"kuvaus": u"Keräämme rahaa internetissä toimivan tietosanakirjan ylläpitämiseen. Rahat menevät servereiden ja muun laitteiston ylläpitoon.",
"epaonnistuminen": "",
"luomisaika": datetime.datetime.now().isoformat(' '),
"alkamispvm": "2011-01-01",
"paattymispvm": None,
"julkaistu": True,
"keraystavat": ["paypal", "lipas"],
"organisaatio": {
"ytunnus": "9879873",
"nimi": "Tietosanakirja"
},
"yhteistyotahot": None,
"rahallinen_tavoite": 1000000000,
"kustannusten_osuus": 20,
"tilinumerot": ["FI89 12345 6789012"],
"linkki": "http://www.tietosanakirja.example"
}
tietokanta.save_doc(kerays3)
def main():
tietokanta = tyhjenna_vanha_ja_alusta()
kayttaja = luo_supertestikayttaja(tietokanta)
ytunnusdict = tee_ytunnusdict(TIEDOSTO)
numero = 1000
with open(TIEDOSTO, 'rb') as csvfile:
lukija = csv.reader(csvfile, delimiter=";")
for rivi in lukija:
try:
kasittele_rivi(rivi, tietokanta, ytunnusdict, numero, kayttaja)
numero += 1
except ValueError:
pass
dummy_kayttajat = dummy_kayttajia(tietokanta)
lisaa_kerayksia(tietokanta, numero, dummy_kayttajat)
if __name__ == "__main__":
main()
| null | WhipAroundRegistry/scripts/alusta_testitietokanta_csvsta.py | alusta_testitietokanta_csvsta.py | py | 11,806 | python | en | code | null | code-starcoder2 | 51 |
489008567 | from __future__ import print_function
import os
import sys
import time
import json
import logging
import logging.config
from collections import defaultdict
from argparse import ArgumentParser
from pkg_resources import resource_filename
from . import get_actions, notify
# The error you get for a nonexistent file is different on py2 vs py3.
if sys.version_info.major > 2:
FileNotFound = FileNotFoundError
else:
FileNotFound = IOError
def run_forever():
"""
Command-line interface.
Every minute, send all the notifications for all the boards.
"""
# Parse command-line args
parser = ArgumentParser()
parser.add_argument('config_file', type=str,
help='Python file to load configuration from')
parser.add_argument('-d', type=str, dest='directory', default='.',
help='Directory in which to save/read state')
parser.add_argument('-i', type=int, dest='interval', default=60,
help='Number of seconds to sleep between rounds')
parser.add_argument('--debug', action='store_true',
help=('Print actions and messages, and don\'t actually'
' send to HipChat'))
args = parser.parse_args()
# Set up logging
cfg_file = resource_filename(
__name__, 'logging_debug.cfg' if args.debug else 'logging.cfg')
logger = logging.getLogger(__name__)
logging.config.fileConfig(cfg_file, disable_existing_loggers=False)
# Load config file
try:
if sys.version_info.major > 2:
import importlib
config = importlib.machinery.SourceFileLoader(
'config', args.config_file).load_module()
else:
import imp
with open(args.config_file) as f:
config = imp.load_module('config', f, args.config_file,
('.py', 'r', imp.PY_SOURCE))
except (FileNotFound, SyntaxError):
logger.error('Unable to import file %s', args.config_file)
sys.exit(1)
if not config.MONITOR:
logger.error('Nothing to monitor!')
sys.exit(2)
interval = max(0, args.interval)
state_file = os.path.join(args.directory, 'last-actions.json')
# Don't check back in time more than 20 minutes ago.
a_while_ago = time.time() - 20*60
last_action_times = defaultdict(lambda: a_while_ago)
try:
last_action_times.update(json.load(open(state_file)))
except (FileNotFound, ValueError):
logger.warning('Warning: no saved state found.')
while True:
# First get all the actions, to avoid doing it multiple times for the
# same board.
new_actions = {}
for parameters in config.MONITOR:
board_id = parameters['board_id']
if board_id not in new_actions:
(actions, new_last_time) = get_actions(
config, last_action_times[board_id], board_id)
new_actions[board_id] = actions
last_action_times[board_id] = new_last_time
# Then send all the HipChat notifications.
for parameters in config.MONITOR:
board_id = parameters['board_id']
# Iterate over the actions, in reverse order because of chronology.
# Defend against the most common type of failure: KeyError
for action in reversed(new_actions[board_id]):
try:
notify(config, action, debug=args.debug, **parameters)
except KeyError:
logger.warn('unable to process action for notification: %r',
action)
# Save state to a file.
with open(state_file, 'w') as f:
json.dump(last_action_times, f)
time.sleep(interval)
| null | trello_hipchat/cli.py | cli.py | py | 3,851 | python | en | code | null | code-starcoder2 | 51 |
115932440 | import os
with open("hightemp.txt","r") as file, open("col1.txt","w") as output1 , open("col2.txt","w") as output2:
for line in file.readlines():
columns = line.split('\t')
first_column = columns[0] + "\n"
output1.write(first_column)
print(first_column)
second_column = columns[1] + "\n"
output2.write(second_column)
print(second_column)
print("確認:")
os.system("cut -f 1 col1.txt")
os.system("cut -f 2 col2.txt")
| null | bambi/chapter02/knock12.py | knock12.py | py | 479 | python | en | code | null | code-starcoder2 | 51 |
398307102 | from random import randint
from .errors import RedisKeyError
from .datatypes import RedisSortable, Comparable, SetOperatorMixin
class ZOrder(object):
"""
Enum with supported sort orders of ZSet
"""
def __new__(self):
return ZOrder
@property
def ASC(self):
return 0
@property
def DESC(self):
return 1
class ZSet(RedisSortable, Comparable, SetOperatorMixin):
"""
An Ordered-set datatype for Python. It's a mixture between Redis' ``ZSet``
and a simple Set-type. Main difference is the concept of a score associated
with every member of the set.
"""
__slots__ = ("_key", "_client", "_pipe")
def __init__(self, client, key, iter=[], type=str, withscores=True):
super(ZSet, self).__init__(client, key, type=type)
self._withscores = withscores
if hasattr(iter, "__iter__") and len(iter):
# TODO: What if the key already exists?
for score, val in iter:
self._pipe.zadd(val, score)
self._pipe.execute()
self.tuple2scalar = lambda a: a[0]
self.tmp_keys = []
self.aggregate = 'sum'
def type_convert_tuple(self, value):
if isinstance(value, tuple):
return (self.type_convert(value[0]), value[1])
else:
return self.type_convert(value)
@property
def data(self):
return map(
self.type_convert_tuple,
self._client.zrange(
self.key,
0,
-1,
withscores=True)
)
@property
def values(self):
return map(
self.type_convert_tuple,
self._client.zrange(self.key, 0, -1)
)
@property
def items(self):
return map(
self.type_convert_tuple,
self._client.zrange(self.key, 0, -1, withscores=True)
)
def __len__(self):
return self._client.zcard(self.key)
def __contains__(self, value):
return self._client.zscore(self.key, value) is not None
def __iter__(self):
# TODO: Is there a better way than getting ALL at once?
if self._withscores:
data = self.items
else:
data = self.values
for item in data:
yield item
def __repr__(self):
return str(self.data)
def __getitem__(self, key):
if isinstance(key, slice):
stop = key.stop
if stop is None:
stop = -1
else:
stop -= 1
start = key.start
if start is None:
start = 0
return map(
self.type_convert_tuple,
self._client.zrange(
self._key,
start,
stop,
withscores=self._withscores)
)
else:
return self.type_convert_tuple(
self._client.zrange(
self._key,
key,
key,
withscores=self._withscores)[0]
)
def __setitem__(self, key, value):
value = self.type_prepare(value)
if isinstance(key, slice):
raise TypeError('Setting slice ranges not supported for zsets.')
else:
item, rank = self._client.zrange(
self._key,
key,
key,
withscores=self._withscores
)[0]
return self._client.zadd(self._key, value, rank)
def add(self, el, score):
"""
Add element ``el`` with ``score`` to this ``ZSet``
"""
try:
return self._client.zadd(self.key, str(el), int(score))
except ValueError:
return False
def discard(self, member):
"""
Remove ``member`` form this set;
Do nothing when element is not a member
"""
self._client.zrem(self.key, member)
def copy(self, key):
"""
Return copy of this ``ZSet`` as new ``ZSet`` with key ``key``
"""
self._client.zunionstore(key, [self.key])
return ZSet(self._client, key)
def union(self, *others):
"""
Return the union of this set and others as new set
"""
data = set(self.data)
for other in others:
for element in other:
data.add(element)
return data
def update(self, *others, **kwargs):
"""
Return the union of this set and others as new set
"""
aggregate = self.aggregate
if 'aggregate' in kwargs:
aggregate = kwargs['aggregate']
redis_keys = self.parse_args(others)
if redis_keys:
self._pipe.zunionstore(
self.key,
redis_keys,
aggregate=aggregate
)
self._delete_temporary()
self._pipe.execute()
def intersection(self, *others, **kwargs):
"""
Return the intersection of this set and others as new set
"""
aggregate = self.aggregate
if 'aggregate' in kwargs:
aggregate = kwargs['aggregate']
redis_keys = self.parse_args(others)
temporary_key = '__tmp__intersection'
self.tmp_keys.append(temporary_key)
if redis_keys:
self._pipe.zinterstore(
temporary_key,
redis_keys,
aggregate=aggregate
)
self._pipe.zrange(temporary_key, 0, -1, withscores=True)
i = self._delete_temporary()
return set(
map(self.type_convert_tuple, self._pipe.execute()[-i - 1])
)
def intersection_update(self, *others, **kwargs):
"""
Update this set with the intersection of itself and others
Accepts both native python sets and redis ZSet objects as arguments
Uses single redis pipe for the whole procedure (= very fast)
"""
aggregate = self.aggregate
if 'aggregate' in kwargs:
aggregate = kwargs['aggregate']
redis_keys = self.parse_args(others)
if redis_keys:
self._pipe.zinterstore(
self.key,
redis_keys,
aggregate=aggregate
)
self._delete_temporary()
self._pipe.execute()
def difference(self, *others):
"""
Return the difference between this set and other as new set
"""
data = set(self.data)
for other in others:
data -= other
return data
def difference_update(self, *others):
"""
Remove all elements of other sets from this set
"""
pipe = self._pipe
pipe.delete(self.key)
for element in self.difference(*others):
pipe.zadd(self.key, element[0], element[1])
pipe.execute()
def symmetric_difference(self, *others):
"""
Return the symmetric difference of this set and others as new set
"""
data = set(self.data)
for other in others:
data = data.symmetric_difference(other)
return data
def symmetric_difference_update(self, *others):
"""
Update this set with the symmetric difference of itself and others
"""
key_union = self.generate_temporary_key()
key_inter = self.generate_temporary_key()
self.tmp_keys = [key_union, key_inter]
redis_keys = self.parse_args(others)
if redis_keys:
self._pipe.zinterstore(key_inter, redis_keys) \
.zunionstore(key_union, redis_keys)
self._pipe.zrange(key_union, 0, -1, withscores=True)
self._pipe.zrange(key_inter, 0, -1, withscores=True)
i = self._delete_temporary()
values = self._pipe.execute()
diff = set(values[-2 - i]) - set(values[-1 - i])
self._pipe.delete(self.key)
for element, score in diff:
self._pipe.zadd(self.key, element, score)
self._pipe.execute()
return set()
def clear(self):
"""
Purge/delete all elements from this set
"""
return self._client.delete(self.key)
def pop(self):
"""
Remove and return a random element from the sorted set.
Raises ``RedisKeyError`` if set is empty.
"""
length = self.__len__()
if (length == 0):
raise RedisKeyError("ZSet is empty")
idx = randint(0, length - 1)
value = self._pipe.zrange(
self.key,
idx,
idx
).zremrangebyrank(self.key, idx, idx).execute()[0][0]
return self.type(value)
def incr_score(self, el, by=1):
"""
Increment score of ``el`` by value ``by``
"""
return self._client.zincrby(self.key, el, by)
def rank_of(self, el, order=ZOrder.ASC):
"""
Return the ordinal index of element ``el`` in the sorted set,
whereas the sortation is based on scores and ordered according
to the ``order`` enum.
"""
if (order == ZOrder.ASC):
return self._client.zrank(self.key, el)
elif (order == ZOrder.DESC):
return self._client.zrevrank(self.key, el)
def score_of(self, el):
"""
Return the associated score of element ``el`` in the sorted set.
When ``el`` is not a member ``NoneType`` will be returned.
"""
return self._client.zscore(self.key, el)
def range_by_rank(self, min, max, order=ZOrder.ASC):
"""
Return a range of elements from the sorted set by specifying ``min``
and ``max`` ordinal indexes, whereas the sortation is based on
scores and ordered according to the given ``order`` enum.
"""
if (order == ZOrder.ASC):
return self._client.zrange(self.key, min, max)
elif (order == ZOrder.DESC):
return self._client.zrevrange(self.key, min, max)
def range_by_score(self, min, max):
"""
Return a range of elements from the sorted set by specifying ``min``
and ``max`` score values, whereas the sortation is based on scores
with a descending order.
"""
return self._client.zrangebyscore(self.key, min, max)
def range_by_score_limit(self, limit=20, before=0, treshold=20):
"""
Return a range of elements from the sorted set by specifying ``min``
score value and the limit of items to be returned
Note: only works for integer based scores
"""
if not before:
return self._client.zrevrange(
self.redis_key,
0,
limit,
withscores=True
)
else:
items = []
while len(items) < limit:
items += self._client.zrevrangebyscore(
self.redis_key,
before - 1,
before - 1 - limit - treshold,
withscores=self._withscores)
if before <= 0:
break
before -= limit + self.treshold
return map(self.type_convert_tuple, items[:limit])
def grab(self):
"""
Return a random element from the sorted set
"""
length = self.__len__()
if (length == 0):
return None
idx = randint(0, length - 1)
return self._pipe.zrange(self.key, idx, idx)[0]
def intersection_copy(self, dstKey, aggregate, *otherKeys):
"""
Return the intersection of this set and others as new set
"""
otherKeys.append(self.key)
return self._client.zinterstore(dstKey, otherKeys, aggregate)
def union_copy(self, dstKey, aggregate, *otherKeys):
otherKeys.append(self.key)
return self._client.zunionstore(dstKey, otherKeys, aggregate)
def remove_range_by_rank(self, min, max):
"""
Remove a range of elements from the sorted set by specifying the
constraining ordinal indexes ``min`` and ``max``.
"""
return self._client.zremrangebyrank(self.key, min, max)
def remove_range_by_score(self, min, max):
"""
Remove a range of elements from the sorted set by specifying the
constraining score values ``min`` and ``max``.
"""
return self._client.zremrangebyscore(self.key, min, max)
| null | redis_natives/zset.py | zset.py | py | 12,655 | python | en | code | null | code-starcoder2 | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.