index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
984,100 | 2ba25eb4434731e840274e8ac98c2d0123e2cfa9 | ii = [('MarrFDI.py', 1), ('SadlMLP.py', 3), ('WilbRLW.py', 1), ('WilbRLW4.py', 1), ('MartHSI2.py', 1), ('LeakWTI3.py', 2), ('PettTHE.py', 1), ('ClarGE2.py', 1), ('KiddJAE.py', 4), ('ClarGE.py', 1), ('NewmJLP.py', 1), ('SoutRD2.py', 2), ('SoutRD.py', 2), ('WheeJPT.py', 1), ('MackCNH.py', 2), ('BentJRP.py', 2), ('RoscTTI.py', 1), ('AinsWRR2.py', 1), ('MereHHB2.py', 3), ('MartHSI.py', 1)] |
984,101 | 437e2f2fae64be6174a67ad819a19a988e25a8c0 | import os
from geo_pyspark.core.enums import FileDataSplitter
from geo_pyspark.core.geom_types import Envelope
from tests.tools import tests_path
input_location = os.path.join(tests_path, "resources/primaryroads-linestring.csv")
query_window_set = os.path.join(tests_path, "resources/zcta510-small.csv")
offset = 0
splitter = FileDataSplitter.CSV
grid_type = "rtree"
index_type = "rtree"
num_partitions = 5
distance = 0.01
query_polygon_set = os.path.join(tests_path, "resources/primaryroads-polygon.csv")
input_count = 3000
input_boundary = Envelope(minx=-123.393766, maxx=-65.648659, miny=17.982169, maxy=49.002374)
input_boundary_2 = Envelope(minx=-123.393766, maxx=-65.649956, miny=17.982169, maxy=49.002374)
match_count = 535
match_with_origin_with_duplicates_count = 875
transformed_envelope = Envelope(14313844.29433424, 16791709.85358734, 942450.5989896542, 8474779.278028419)
transformed_envelope_2 = Envelope(14313844.29433424, 16791709.85358734, 942450.5989896542, 8474645.488977494)
|
984,102 | 49e38e7e0cbc4f28bdab8a0a41ea7a8c36a6a3e5 | '''
Created on 3 feb 2013
@author: Gildur
'''
#coding=UTF-8
import sys
import argparse
import logging
import UserDialogue
import Replicator
import ReplicatorConfig
def main(argv):
parser = argparse.ArgumentParser(description=ReplicatorConfig.program_description)
parser.add_argument("src", help="Source Directory")
parser.add_argument("dst", help="Destination Directory")
args = parser.parse_args(argv)
replicate_directories(args.src, args.dst)
def replicate_directories(source_directory, destination_directory):
try:
replicator = Replicator.Replicator()
user_dialogue = UserDialogue.UserDialogue()
if user_dialogue.query_yes_or_no(ReplicatorConfig.question_replicate_directory_structure, ReplicatorConfig.default_answer_to_replicate_directory_structure_query):
replicator.replicate(source_directory, destination_directory)
replicator.display_successful_replication_message(destination_directory)
else:
print("Terminating program.")
sys.exit(0)
except OSError as error:
if(str(error) == ReplicatorConfig.error_message_directory_already_exists or str(error) == ReplicatorConfig.error_message_directory_to_be_replicated_does_not_exist):
print("OSError: {0}".format(str(error)))
else:
logging.warning("OSError: {0}".format(str(error)))
replicator.display_successful_replication_message(destination_directory)
print("There were OSErrors while replicating the directory structure, probably due to lack of access/permission rights to some of the folders.\nCheck errors.log for details.")
sys.exit(1)
except Exception as error:
print("An unexpected exception was encountered: {0}".format(str(error)))
sys.exit(1)
finally:
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)s %(message)s", filename="errors.log", level=logging.WARNING)
sys.exit(main(sys.argv[1:]))
|
984,103 | 48822647c8db504ea258d47d02873fd923e7c92e | import pandas as pd
import talib
import ta
# def add_atr(dataset,param,first_header):
# field_name = 'atr' + str(param)
# df = dataset[first_header].copy()
#
# atr = talib.ATR(df['High'].values, df['Low'].values, df['Close'].values, timeperiod=param)
# column = pd.DataFrame(atr, index=df.index)
#
# dataset[first_header, field_name] = column
# return dataset
def add_aroon(dataset,param, first_header):
field_name_up = 'aroon-up' + str(param)
field_name_down = 'aroon-down' + str(param)
df = dataset[first_header].copy()
aroon_down, aroon_up = talib.AROON(df['High'].values, df['Low'].values, timeperiod=param)
column_up = pd.DataFrame(aroon_up, index=df.index)
column_down = pd.DataFrame(aroon_down, index=df.index)
dataset[first_header, field_name_up] = column_up
dataset[first_header, field_name_down] = column_down
return dataset
def add_aroon_s(dataset,param, first_header):
field_name_up = 'aroon_s' + str(param)
df = dataset[first_header].copy()
high = df['High'].values
low = df['Low'].values
aroon = talib.AROONOSC(high, low, timeperiod=14)/100
col = pd.DataFrame(aroon, index=df.index)
dataset[first_header, field_name_up] = col
return dataset
def add_mfi(dataset,param, first_header):
field_name_up = 'mfi' + str(param)
df = dataset[first_header].copy()
high = df['High'].values
low = df['Low'].values
close = df['Close'].values
volume = df['Volume'].astype(float).values
mfi = talib.MFI(high, low, close, volume, timeperiod=param)/100 - 0.5
col = pd.DataFrame(mfi, index=df.index)
dataset[first_header, field_name_up] = col
return dataset
def add_fi(dataset,param, first_header):
field_name_up = 'fi' + str(param)
df = dataset[first_header].copy()
close = df['Close']
volume = df['Volume']
ind = ForceIndex(close, volume, timeperiod=param)
col = pd.DataFrame(ind, index=df.index)
dataset[first_header, field_name_up] = col
return dataset
def ForceIndex(close,volume,timeperiod):
return pd.Series(close.diff(timeperiod) * volume)
def ssl(high, low, close, timeperiod):
avgH = pd.Series(high, index=high.index).rolling(window=timeperiod).mean().values
avgL = pd.Series(low, index=low.index).rolling(window=timeperiod).mean().values
close = pd.Series(close, index=low.index).values
hilo = 0
gann_hilos = []
for i in range(0, len(avgH)):
if close[i] > avgH[i]:
hilo = 1
elif close[i] < avgL[i]:
hilo = 0
if hilo:
gann_hilo = avgL[i]
else:
gann_hilo = avgH[i]
gann_hilos.append(gann_hilo)
return gann_hilos
def add_ssl(dataset,param, first_header):
field_name = 'ssl' + str(param)
df = dataset[first_header].copy()
ssl_list = ssl(df['High'], df['Low'], df['Close'], param)
col = pd.DataFrame(ssl_list, index=df.index)
dataset[first_header, field_name] = col
return dataset
def add_ssl_s(dataset,param, first_header):
field_name = 'ssl_s' + str(param)
df = dataset[first_header].copy()
close = df['Close']
ssl_list = ssl(df['High'], df['Low'], df['Close'], param)
col = pd.Series(ssl_list, index=df.index)
dataset[first_header, field_name] = close-col
return dataset
def add_rsi(dataset,param, first_header):
field_name = 'rsi' + str(param)
df = dataset[first_header].copy()
rsi = talib.RSI(df['Close'], timeperiod=param)/100
col = pd.DataFrame(rsi, index=df.index)
dataset[first_header, field_name] = col
return dataset
def add_cmf(dataset,param, first_header):
field_name = 'cmf' + str(param)
df = dataset[first_header].copy()
high = df['High']
low = df['Low']
close = df['Close']
volume = df['Volume']
cmf = ta.volume.chaikin_money_flow(high, low, close, volume, n=param, fillna=True)
col = pd.DataFrame(cmf, index=df.index)
dataset[first_header, field_name] = col
return dataset
def add_macd_diff(dataset,param, first_header):
if len(param)!=3:
raise Exception('This ratio requires three parameters, [0]:fast ema, [1]: slow ema, [2]:sign ema')
field_name = 'macd_diff' + str(param[0]) + '_' + str(param[1]) + '_' + str(param[2])
df = dataset[first_header].copy()
close = df['Close']
macd = ta.trend.macd_diff(close, n_fast=param[0], n_slow=param[1], n_sign=param[2], fillna=True)
col = pd.DataFrame(macd, index=df.index)
dataset[first_header, field_name] = col
return dataset
def add_cmo(dataset,param, first_header):
field_name = 'cmo' + str(param)
df = dataset[first_header].copy()
close = df['Close']
cmo = talib.CMO(close, timeperiod=14)/100
col = pd.DataFrame(cmo, index=df.index)
dataset[first_header, field_name] = col
return dataset
def add_sar(dataset,param, first_header):
if len(param) != 2:
raise Exception('This ratio requires two parameters, [0]:acceleration, [1]: maximum ')
field_name = 'sar' + str(param[0]) + '_' + str(param[1])
df = dataset[first_header].copy()
high = df['High']
low = df['Low']
sar = talib.SAR(high, low, acceleration=param[0], maximum=param[1])
col = pd.DataFrame(sar, index=df.index)
dataset[first_header, field_name] = col
return dataset
def add_sar_s(dataset,param, first_header):
if len(param) != 2:
raise Exception('This ratio requires two parameters, [0]:acceleration, [1]: maximum ')
field_name = 'sar_s' + str(param[0]) + '_' + str(param[1])
df = dataset[first_header].copy()
high = df['High']
low = df['Low']
close = df['Close']
sar = talib.SAR(high, low, acceleration=param[0], maximum=param[1])
col = pd.Series(sar, index=df.index)
dataset[first_header, field_name] = close-col
return dataset
# def add_dema()
def add_ema_slope(dataset,param, first_header):
mean_periods = 5
ema_name = 'ema' + str(param)
field_name = 'ema_slope' + str(param)
if ema_name in dataset[first_header].columns.values.tolist():
ema_col = dataset[first_header, ema_name]
else:
# print('Param is: ' + str(param))
ema_col = dataset[first_header, 'Close'].ewm(span=int(param), adjust=False, min_periods=int(param)).mean()
ema_slope_col = ema_col.diff().rolling(window=mean_periods).mean()
dataset[first_header, field_name] = ema_slope_col
return dataset
def add_ema_slope_small_dataset(dataset,param):
if len(param) != 2:
raise Exception('This ratio requires two parameters, [0]:period, [1]: mean parameter ')
ema_name = 'ema' + str(param[0])
field_name = ema_name + '_slope_mean' + str(param[1])
if ema_name in dataset.columns.values.tolist():
ema_col = dataset[ ema_name]
else:
ema_col = dataset['Close'].ewm(span=param[0], adjust=False, min_periods=param[0]).mean()
ema_slope_col = ema_col.diff().rolling(window=param[1]).mean()
dataset[ field_name] = ema_slope_col
return dataset |
984,104 | ee160a3508389063dbd83993499298f253ae18d9 | from django.conf.urls import url
from . import views
from .API.group import *
from .API.user import *
from .API.device import *
from django.contrib.auth import views as auth_views
from socket import gethostname
from django.conf import settings
app_name = 'reg'
urlpatterns = [
url(r'^$',
views.group,
name='index'),
url(r'^group/(?P<group_id>[0-9]+)$',
views.group,
name='group'),
url(r'ajax/$',
views.ajaxHandler,
name='ajaxHandler'),
url(r'api/groups/$',
DeviceGroupAPI.as_view(),
name='APIGroupList'),
url(r'api/groups/(?P<pk>[0-9]+)$',
DeviceGroupAPI.as_view(),
name='APIGroupView'),
url(r'api/groups/(?P<action>all)$',
DeviceGroupAPI.as_view(),
name='APIGroupViewAll'),
url(r'api/groups/(?P<pk>[0-9]+)/(?P<action>detail|members|devices|admins)/(?P<item>.*)$',
DeviceGroupAPI.as_view(),
name='APIGroupViewDetail'),
url(r'api/users/$',
UserAPI.as_view(),
name='APIUserList'),
url(r'api/users/(?P<username>[0-9a-zA-Z]+)/$',
UserAPI.as_view(),
name='APIUserDetail'),
url(r'api/users/(?P<username>[0-9a-zA-Z]+)/(?P<action>groups)/$',
UserAPI.as_view(),
name='APIUserGroup'),
url(r'api/devices/$',
DeviceAPI.as_view(),
name='APIDeviceList'),
url(r'api/devices/(?P<action>detail|all/detail|all)/$',
DeviceAPI.as_view(),
name='APIDeviceList'),
url(r'api/devices/(?P<mac>[0-9a-fA-f:]+)/$',
DeviceAPI.as_view(),
name='APIDeviceDetail'),
url(r'logout/$',
auth_views.logout,
{'template_name': 'registration/logout.tpl'},
name='logout'),
url(r'admin/',
views.admin,
name='admin'),
]
if settings.DEV == True:
urlpatterns += [
url(r'^.*login/$',
auth_views.login,
{'template_name': 'registration/login.tpl'},
name='login'),
]
|
984,105 | 47756b3e29ecaceda30128ff5ed6bfcc12c262aa | class User:
"""
Dummy class for sake of example, to make the stubs.py actually run.
"""
def __init__(self, user_id):
self.user_id = user_id
def get_subscriptions(self):
if self.user_id == 123:
return ['silver']
else:
return ['bronze']
|
984,106 | 20771cfd02404d9fe5c2f227cce1e0bc51520f08 | #!/usr/bin/python
'''
100 Prisoners will all be executed
unless they pick their number from
100 boxes. One by one, in a sealed
room. You only get to open half of
the boxes. If you havent found the
number you need, you are dead mate
'''
from random import shuffle
numPrisoners = 100
iterations = 1000
def badStrategy(prisoner) :
boxesToOpen = boxes
shuffle(boxesToOpen)
for boxToOpen in boxesToOpen[:numPrisoners/2] :
if prisoner == boxes[boxToOpen] :
return True
return False
def goodStrategy(prisoner) :
# See if the my number, N, is in the N:th box
numberFound = boxes[ prisoner]
boxesOpened = 1;
while boxesOpened < numPrisoners/2 :
if numberFound == prisoner :
#I lived!
return True
numberFound = boxes[numberFound]
boxesOpened += 1;
return False
def prisonersOpen(boxes) :
# Prisoners take turn opening boxes
for prisoner in prisoners :
if not goodStrategy(prisoner) :
return 0
return 1
# Assign all prisoners a number
prisoners = range(numPrisoners)
successes = 0.0
for iter in range(iterations) :
# Put a random number in each box
boxes = prisoners
shuffle(boxes)
successes += prisonersOpen(boxes)
print('Chance to survive: ' + str(successes/iterations)) |
984,107 | 3873d8c7dfc069bb261e99b18f367ebe7152ed58 | #%%
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pymaid_creds import url, name, password, token
from data_settings import pairs_path
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
from contools import Celltype, Celltype_Analyzer, Promat
import navis
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
pairs = Promat.get_pairs(pairs_path=pairs_path)
# %%
# load and plot some LNs
LNs = pymaid.get_skids_by_annotation('mw LNs to plot')
LNs = Promat.extract_pairs_from_list(LNs, pairList=pairs)[0]
neuropil = pymaid.get_volume('PS_Neuropil_manual')
neuropil.color = (250, 250, 250, .075)
LN_color = '#5D8B90'
n_rows = 3
n_cols = 5
alpha = 1
fig = plt.figure(figsize=(n_cols*2, n_rows*2))
gs = plt.GridSpec(n_rows, n_cols, figure=fig, wspace=0, hspace=0)
axs = np.empty((n_rows, n_cols), dtype=object)
for i, index in enumerate(LNs.index):
neurons = pymaid.get_neurons(LNs.loc[index, :].values)
inds = np.unravel_index(i, shape=(n_rows, n_cols))
ax = fig.add_subplot(gs[inds], projection="3d")
axs[inds] = ax
navis.plot2d(x=[neurons, neuropil], connectors_only=False, color=LN_color, alpha=alpha, ax=ax)
ax.azim = -90
ax.elev = -90
ax.dist = 6
ax.set_xlim3d((-4500, 110000))
ax.set_ylim3d((-4500, 110000))
fig.savefig(f'plots/morpho_LNs.png', format='png', dpi=300, transparent=True)
# %%
# load and plot some PNs
PNs = pymaid.get_skids_by_annotation('mw PNs to plot')
PNs = pm.Promat.extract_pairs_from_list(PNs, pairList=pairs)[0]
neuropil = pymaid.get_volume('PS_Neuropil_manual')
neuropil.color = (250, 250, 250, .075)
PN_color = '#1D79B7'
n_rows = 3
n_cols = 5
alpha = 1
fig = plt.figure(figsize=(n_cols*2, n_rows*2))
gs = plt.GridSpec(n_rows, n_cols, figure=fig, wspace=0, hspace=0)
axs = np.empty((n_rows, n_cols), dtype=object)
for i, index in enumerate(PNs.index):
neurons = pymaid.get_neurons(LNs.loc[index, :].values)
inds = np.unravel_index(i, shape=(n_rows, n_cols))
ax = fig.add_subplot(gs[inds], projection="3d")
axs[inds] = ax
navis.plot2d(x=[neurons, neuropil], connectors_only=False, color=PN_color, alpha=alpha, ax=ax)
ax.azim = -90
ax.elev = -90
ax.dist = 6
ax.set_xlim3d((-4500, 110000))
ax.set_ylim3d((-4500, 110000))
fig.savefig(f'plots/morpho_PNs.png', format='png', dpi=300, transparent=True)
# %%
# plot all LNs types
import math
neuropil = pymaid.get_volume('PS_Neuropil_manual')
neuropil.color = (250, 250, 250, .075)
LN = Celltype_Analyzer.get_skids_from_meta_annotation('mw brain LNs')
ipsi = pymaid.get_skids_by_annotation('mw ipsilateral axon')
bilateral = pymaid.get_skids_by_annotation('mw bilateral axon')
contra = pymaid.get_skids_by_annotation('mw contralateral axon')
LN_ipsi = np.intersect1d(LN, ipsi)
LN_bilat = np.intersect1d(LN, bilateral)
LN_contra = np.intersect1d(LN, contra)
# create dataframes with left/right neuron pairs; nonpaired neurons had duplicated skids in left/right column
LN_ipsi = Promat.load_pairs_from_annotation(annot='', pairList=pairs, return_type='all_pair_ids_bothsides', skids=LN_ipsi, use_skids=True)
LN_bilat = Promat.load_pairs_from_annotation(annot='', pairList=pairs, return_type='all_pair_ids_bothsides', skids=LN_bilat, use_skids=True)
LN_contra = Promat.load_pairs_from_annotation(annot='', pairList=pairs, return_type='all_pair_ids_bothsides', skids=LN_contra, use_skids=True)
LN_color = '#5D8B90'
# sort LN_ipsi with published neurons first
pub = [7941652, 7941642, 7939979, 8311264, 7939890, 5291791, 8102935, 8877971, 8274021, 10555409, 7394271, 8273369, 17414715, 8700125, 8480418, 15571194]
pub_names = ['Broad D1', 'Broad D2', 'Broad T1', 'Broad T2', 'Broad T3', 'picky 0', 'picky 1', 'picky 2', 'picky 3', 'picky 4', 'choosy 1', 'choosy 2', 'ChalOLP', 'GlulOLP', 'OLP4', 'APL']
pub = pub + [4985759, 4620453]
pub_names = pub_names + ['']*(len(LN_ipsi.index) - len(pub_names))
LN_ipsi = LN_ipsi.set_index('leftid', drop=False)
LN_ipsi = LN_ipsi.loc[pub + list(np.setdiff1d(LN_ipsi.index, pub)), :]
LN_ipsi.index = range(len(LN_ipsi.index))
# ipsi LNs
n_cols = 8
n_rows = math.ceil(len(LN_ipsi)/n_cols) # round up to determine how many rows there should be
alpha = 1
zoom = 5.5
fig = plt.figure(figsize=(n_cols*2, n_rows*2))
gs = plt.GridSpec(n_rows, n_cols, figure=fig, wspace=0, hspace=0)
axs = np.empty((n_rows, n_cols), dtype=object)
for i, index in enumerate(LN_ipsi.index):
neurons = pymaid.get_neurons(np.unique(LN_ipsi.loc[index, :].values))
inds = np.unravel_index(i, shape=(n_rows, n_cols))
ax = fig.add_subplot(gs[inds], projection="3d")
axs[inds] = ax
navis.plot2d(x=[neurons, neuropil], connectors_only=False, color=LN_color, alpha=alpha, ax=ax)
ax.azim = -90
ax.elev = -90
ax.dist = zoom
ax.set_xlim3d((-4500, 110000))
ax.set_ylim3d((-4500, 110000))
ax.text(x=(ax.get_xlim()[0] + ax.get_xlim()[1])/2 + ax.get_xlim()[1]*0.05, y=ax.get_ylim()[1]*4/5, horizontalalignment="center", z=0,
s=pub_names[i], transform=ax.transData, color=LN_color, alpha=1, fontsize=10)
fig.savefig(f'plots/morpho_ipsi_LNs.png', format='png', dpi=300, transparent=True)
# bilat LNs
n_cols = 8
n_rows = math.ceil(len(LN_bilat)/n_cols) # round up to determine how many rows there should be
alpha = 1
fig = plt.figure(figsize=(n_cols*2, n_rows*2))
gs = plt.GridSpec(n_rows, n_cols, figure=fig, wspace=0, hspace=0)
axs = np.empty((n_rows, n_cols), dtype=object)
for i, index in enumerate(LN_bilat.index):
neurons = pymaid.get_neurons(np.unique(LN_bilat.loc[index, :].values))
inds = np.unravel_index(i, shape=(n_rows, n_cols))
ax = fig.add_subplot(gs[inds], projection="3d")
axs[inds] = ax
navis.plot2d(x=[neurons, neuropil], connectors_only=False, color=LN_color, alpha=alpha, ax=ax)
ax.azim = -90
ax.elev = -90
ax.dist = zoom
ax.set_xlim3d((-4500, 110000))
ax.set_ylim3d((-4500, 110000))
fig.savefig(f'plots/morpho_bilat_LNs.png', format='png', dpi=300, transparent=True)
# contra LNs
n_cols = 8
n_rows = math.ceil(len(LN_contra)/n_cols) # round up to determine how many rows there should be
alpha = 1
fig = plt.figure(figsize=(n_cols*2, n_rows*2))
gs = plt.GridSpec(n_rows, n_cols, figure=fig, wspace=0, hspace=0)
axs = np.empty((n_rows, n_cols), dtype=object)
for i, index in enumerate(LN_contra.index):
neurons = pymaid.get_neurons(np.unique(LN_contra.loc[index, :].values))
inds = np.unravel_index(i, shape=(n_rows, n_cols))
ax = fig.add_subplot(gs[inds], projection="3d")
axs[inds] = ax
navis.plot2d(x=[neurons, neuropil], connectors_only=False, color=LN_color, alpha=alpha, ax=ax)
ax.azim = -90
ax.elev = -90
ax.dist = zoom
ax.set_xlim3d((-4500, 110000))
ax.set_ylim3d((-4500, 110000))
fig.savefig(f'plots/morpho_contra_LNs.png', format='png', dpi=300, transparent=True)
# %%
# plot many 2nd-order PNs
neuropil = pymaid.get_volume('PS_Neuropil_manual')
neuropil.color = (250, 250, 250, .075)
LN = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain LNs')
MBINs = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain MBINs')
MBONs = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain MBONs')
KCs = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain KCs')
uPNs = pymaid.get_skids_by_annotation('mw uPN')
mPNs = pymaid.get_skids_by_annotation('mw mPN')
outputs = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain outputs')
exclude = LN + MBINs + MBONs + KCs + uPNs + mPNs + outputs + [5327961, 11184236] + [12740290, 2432564] #potential LN that didn't pass threshold and a quasi-dSEZ neuron
PN_guste = pymaid.get_skids_by_annotation('mw gustatory-external 2nd_order')
PN_guste = np.setdiff1d(PN_guste, exclude)
PN_guste, _, _nonpaired = pm.Promat.extract_pairs_from_list(PN_guste)
PN_gustp = pymaid.get_skids_by_annotation('mw gustatory-pharyngeal 2nd_order')
PN_gustp = np.setdiff1d(PN_gustp, exclude)
PN_gustp, _, _nonpaired = pm.Promat.extract_pairs_from_list(PN_gustp)
PN_ent = pymaid.get_skids_by_annotation('mw enteric 2nd_order')
PN_ent = np.setdiff1d(PN_ent, exclude)
PN_ent, _, _nonpaired = pm.Promat.extract_pairs_from_list(PN_ent)
PNs = pd.concat([PN_guste.loc[0:9, :], PN_gustp.loc[0:8, :], PN_ent.loc[0:8, :]]).reset_index(drop=True)
# contra LNs
n_cols = 4
n_rows = math.ceil(len(PNs)/n_cols) # round up to determine how many rows there should be
alpha = 1
PN_color = '#1D79B7'
fig = plt.figure(figsize=(n_cols*2, n_rows*2))
gs = plt.GridSpec(n_rows, n_cols, figure=fig, wspace=0, hspace=0)
axs = np.empty((n_rows, n_cols), dtype=object)
for i, index in enumerate(PNs.index):
neurons = pymaid.get_neurons(np.unique(PNs.loc[index, :].values))
inds = np.unravel_index(i, shape=(n_rows, n_cols))
ax = fig.add_subplot(gs[inds], projection="3d")
axs[inds] = ax
navis.plot2d(x=[neurons, neuropil], connectors_only=False, color=PN_color, alpha=alpha, ax=ax)
ax.azim = -90
ax.elev = -90
ax.dist = 6
ax.set_xlim3d((-4500, 110000))
ax.set_ylim3d((-4500, 110000))
fig.savefig(f'identify_neuron_classes/plots/morpho_non-uPN-mPN_PNs.png', format='png', dpi=300, transparent=True)
# %%
|
984,108 | dbabde4bc59d7dfa2b3f1af4133cf3e57b8f02c7 | def tambölenleribulma(sayı):
tam_bölenler= []
for i in range(2,sayı):
if (sayı % i== 0):
tam_bölenler.append(i)
return tam_bölenler
while True:
sayı=input("sayı")
if(sayı=="q"):
print("program sonlandırılıyor..")
break
else:
sayı=int(sayı)
print("tam bölenleri:",tambölenleribulma(sayı)) |
984,109 | 3076f5645718c2f5af2d39d2054083542530d7e4 | # -*- coding: utf-8 -*-
""" DESCRIPTION:
This fMRI experiment displays different sentences with motion or non-motion verbs.
The script awaits a trigger pulse from the scanner with the value "t"
/Roberta Rocca & Mikkel Wallentin 2018 (with some of the code adapted from Jonas LindeLoev: https://github.com/lindeloev/psychopy-course/blob/master/ppc_template.py)
Structure:
- Import modules
- Set monitor variables
- Get participants info using GUI
- INITIALIZE WINDOW
- PREPARE STIMULI
- RESPONSES AND OTHER COMMANDS
- OUTPUT LOGFILE
- FUNCTIONS FOR EXPERIMENTAL LOOP
- Define the experimental loop!
- SET UP INSTRUCTIONS
- RUN THE EXPERIMENT
"""
############# IMPORT MODULES ################################
from __future__ import division
from psychopy import core, visual, event, gui, data, monitors
from itertools import product
from random import sample
import pandas as pd
import os
############# SET MONITOR VARIABLES ###########################
# Monitor parameters
MON_distance = 60 # Distance between subject's eyes and monitor
MON_width = 34 # Width of your monitor in cm
MON_size = [1440, 900] # Pixel-dimensions of your monitor, width / height
MON_frame_rate = 60 # Hz: usually 60, but you can always check with the function getActualFrameRate
# Create monitor
my_monitor = monitors.Monitor('testMonitor', width=MON_width, distance=MON_distance) # Create monitor object from the variables above. This is needed to control size of stimuli in degrees.
my_monitor.setSizePix(MON_size)
############# GET PARTICIPANT INFO USING GUI ###########################
# Initialize dictionary with required variables
EXP_info = {'ID':'',
'age':'',
'gender':['female','male'],
'Scanner':['Prisma','Skyra'],
'Scan day': ['Tuesday','Wednesday']}
# DlgFromDict creates dialog with fields specified in dictionary to gather required info
if not gui.DlgFromDict(EXP_info, title = 'Semantics experiment', order=['ID', 'age', 'gender','Scanner','Scan day']).OK: # dialog box; order is a list of keys
core.quit()
############# INITIALIZE WINDOW ###########################
# Initialize window for stimulus presentation
win = visual.Window(monitor = my_monitor,
units = 'deg',
fullscr = False, # Set to True
allowGUI = False,
color = 'black') # Initiate psychopy Window as the object "win", using the myMon object from last line. Use degree as units!
############# PREPARE STIMULI ################################
# Experiment details
# 12 min. exp. = 720 sec. -> 144 trials with stimulus duration 1500 ms onset asyncrony of 5 s (i.e. 3.5 s fixation cross in between on average.
# Specify conditions
condition = 'sentence_exp' # Here just one. If there are more conditions, add here or call in GUI.
# Define sentence components
STIM_pros = ('We ','You ','They ') #Initial pronoun
STIM_verbs = ('walk ','run ','jump ','stand ','sit ','lie ') #three motion verbs, three non-motion verbs
STIM_preps = ('in ', 'into ') # static / dynamic prepositions
STIM_locs = ('the house', 'the garden','the culture','the system') #one concrete one abstract
# Timing details
STIM_dur = int(1.5 * MON_frame_rate) # duration in seconds multiplied by 60 Hz and made into integer
STIM_delays = (140, 180, 240, 280) # different time intervals between stimuli mean 3.5 sec x 60 hz refresh rate = 210, in order to make less predictable and increase power.
STIM_repetitions = 1
STIM_trials = 144
# The image size and position using ImageStim, file info added in trial list below.
STIM_image = visual.ImageStim(win,
mask=None,
pos=(0.0, 0.0), # Should be default
size=(14.0, 10.5),
ori=1)
# Prepare Fixation cross, i.e. just the character "+".
STIM_fix = visual.TextStim(win, '+')
############# RESPONSES AND OTHER COMMANDS ################################
# Relevant keys
KEYS_quit = ['escape'] # Keys that quits the experiment
KEYS_trigger = ['t'] # The MR scanner sends a "t" to notify that it is starting
KEYS_response = ['y', 'b'] # Yellow and blue buttons on response box
############# OUTPUT LOGFILE ################################
# Define output folder
OUTPUT_folder = 'sentence_exp_data' # Log is saved to this folder. The folder is created if it does not exist (see ppc.py).
# If folder does not exist, do create it
if not os.path.exists(OUTPUT_folder):
os.makedirs(OUTPUT_folder)
# Set filename
date = data.getDateStr()
OUTPUT_filename = OUTPUT_folder + '/log_' + str(EXP_info['ID']) + '_' + date + '.csv'
############# FUNCTIONS FOR EXPERIMENTAL LOOP ########################
##### Create all combos and store in a dataframe
def make_trial_list(delays_list, nr_trials, duration): # Add condition as argument and to combinations function if you have many
cols_sent = ['pronoun', 'verb', 'preposition', 'location']
# Create a df with all combinations of sentence components
global STIM_comb_df
STIM_comb_df = pd.DataFrame(list(product(STIM_pros, STIM_verbs, STIM_preps, STIM_locs)), columns = cols_sent)
STIM_comb_df = STIM_comb_df.sample(frac = 1).reset_index(drop=True)
# create list with delays, and shuffle
STIM_comb_df['delay'] = int(nr_trials/len(delays_list)) * delays_list
STIM_comb_df['delay'] = sample(STIM_comb_df['delay'], nr_trials)
# Add info on duration (do as in delay or shift this part to the experimental loop if you have different durations)
STIM_comb_df['duration_frames'] = duration
# Add default to other relevant columns
cols_ID = ['ID', 'age', 'gender', 'Scanner', 'Scan_day']
cols_resp = ['onset','offset', 'duration_measured','response', 'key_t', 'rt', 'correct_resp']
STIM_comb_df = pd.concat([STIM_comb_df, pd.DataFrame(columns = cols_ID + cols_resp)], axis = 1)
# Fill with ID info
STIM_comb_df[cols_ID] = EXP_info.values()
# Fill with default for response info
STIM_comb_df[cols_resp] = ''
# Add trial number
STIM_comb_df['trial_nr'] = range(1, 145)
##### Define the experimental loop!
def run_condition(condition):
"""
Runs a block of trials. This is the presentation of stimuli,
collection of responses and saving the trial
"""
# Make global changes to the dataframe
global STIM_comb_df
# Loop over trials
for i in range(STIM_trials):
# Clear keyboard record
event.clearEvents(eventType='keyboard')
# Prepare image
stim_sent = STIM_comb_df['pronoun'][i] + STIM_comb_df['verb'][i] + STIM_comb_df['preposition'][i] + STIM_comb_df['location'][i]
# Display image and monitor time
time_flip = core.monotonicClock.getTime() # onset of stimulus
for frame in range(STIM_dur):
stim_sentence = visual.TextStim(win=win, text=stim_sent, pos=[0,0], height=1, alignHoriz='center')
stim_sentence.draw()
win.flip()
# Display fixation cross
offset = core.monotonicClock.getTime() # offset of stimulus
for frame in range(STIM_comb_df['delay'][i]):
STIM_fix.draw()
win.flip()
# Get actual duration at offset
#Log time variables
STIM_comb_df['onset'][i]= time_flip - exp_start
STIM_comb_df['offset'][i] = offset - exp_start
STIM_comb_df['duration_measured'][i] = offset - time_flip
# Check responses
keys = event.getKeys(keyList=('y','b','escape'), timeStamped=True) # timestamped according to core.monotonicClock.getTime() at keypress. Select the first and only answer.
# Log first response only
if keys:
key = keys[0][0]
time_key = keys[0][1]
# Log info on responses
STIM_comb_df['response'][i] = key
STIM_comb_df['key_t'][i] = time_key - exp_start
STIM_comb_df['rt'][i] = time_key - time_flip
# Check if responses are correct
if STIM_comb_df['response'][i] == 'y':
STIM_comb_df['correct_resp'][i] = 1 if STIM_comb_df['verb'][i] in STIM_verbs[0:3] else 0
if STIM_comb_df['response'][i] == 'b':
STIM_comb_df['correct_resp'][i] = 1 if STIM_comb_df['verb'][i] not in STIM_verbs[0:3] else 0
# Check if escape key was pressed
if key in KEYS_quit:
STIM_comb_df.to_csv(OUTPUT_filename)
win.close()
core.quit()
############# SET UP INSTRUCTIONS ########################
def msg(txt):
message = visual.TextStim(win, pos =[0,0], text = txt, height = 0.75, alignHoriz = 'center') # create an instruction text
message.draw() # draw the text stimulus in a "hidden screen" so that it is ready to be presented # flip the screen to reveal the stimulus
win.flip()
introText = '''
Welcome.
In this experiment you have to read sentences.
Press button with INDEX finger if meaning CLEAR.
Press button with MIDDLE finger if meaning STRANGE.
The experiment will start in a few moments
'''
outroText = '''
This is the end of the experiment.
Thank you for your participation!
'''
############# RUN THE EXPERIMENT ########################
# Setup the dataframe
make_trial_list(STIM_delays, STIM_trials, STIM_dur)
# Display instructions
msg(introText)
#Wait for scanner trigger "t" to continue
event.waitKeys(keyList = KEYS_trigger)
# Start timer
exp_start = core.monotonicClock.getTime()
# Run the experimental loop
run_condition('sentence_exp')
# save logfile
STIM_comb_df.to_csv(OUTPUT_filename)
# outro
msg(outroText)
# stop experiment
win.close()
core.quit()
|
984,110 | 5d0a595a7d014e2dec88daed14e8cf54b56aef60 | num=int(input("enter any number"))
count=0
while(num>=1):
num=num//10
count=count+1
print(count)
Result:
enter any number123456
6
|
984,111 | b07c2026fb748960254d950699abbd1ffc080330 | import re
from sys import stdout
from learn.embedding import *
def tactic_simplify(tactic):
"""
simplify the tactic, e.g. apply H0. -> apply
:param tactic:
:return: name of the tactic
"""
names = re.split("[\;\.\ \(\)]", tactic)
return names[0]
def render_natural(goals, tactics, filename=None):
if filename is not None:
f = open(filename, 'w')
else:
f = stdout
f.write("%d\n" % len(goals))
for goal in goals:
assert isinstance(goal, CoqGoalSingle)
f.write("%d\n" % len(goal.hypothesis))
for hypo in goal.hypothesis:
assert isinstance(hypo, CoqHypothesis)
lsthypo = hypo.ids + term2seq(hypo.type)
f.write(str(lsthypo))
f.write("\n")
f.write(str(term2seq(goal.type)))
f.write("\n")
f.write(tactic_simplify(tactics[goals.index(goal)]))
f.write("\n")
def render(dataset, filename=None):
if filename is not None:
f = open(filename, 'w')
else:
f = stdout
f.write("%d\n" % len(dataset))
for goal in dataset:
assert isinstance(goal, CoqGoalSingle)
f.write("%d\n" % len(goal.hypothesis))
for hypo in goal.hypothesis:
assert isinstance(hypo, CoqHypothesis)
vec_hypo = serialize(hypo.type)
f.write(vec2text(vec_hypo))
f.write(vec2text(serialize(goal.type)))
f.write("\n") |
984,112 | 29a8a2b7e5d227a46a98f29e8c20978e5d6ccc1f | import numpy as np
from scipy.signal import StateSpace, dlsim
import matplotlib.pyplot as plt
#define some constant value
G = np.asarray([[1.0, 0.2, 0.1],
[0.1, 2.0, 0.1],
[0.3, 0.1, 3.0]])
r = 3
a = 1.2
e = 0.1
# Build the system
A = np.asarray([[0., a*r*G[0][1]/G[0][0], a*r*G[0][2]/G[0][0]],
[a*r*G[1][0]/G[1][1], 0., a*r*G[1][2]/G[1][1]],
[a*r*G[2][0]/G[2][2], a*r*G[2][1]/G[2][2], 0.]])
B = np.asarray([[a*r/G[0][0]],
[a*r/G[1][1]],
[a*r/G[2][2]]])
C = np.asarray([0., 0., 0.])
D = np.asarray([0.])
plane_sys = StateSpace(A, B, C, D, dt = 1)
#define the simulation time step
t = np.arange(0, 30, 1)
#Simulate the system to get p
input = e * e * np.ones(len(t))
_, y, x = dlsim(plane_sys, input, t, x0=[0.1, 0.1, 0.1])
p = x.T
#Calculate S
def calculateS(p1, p2, p3, G1, G2, G3):
s = np.zeros(len(p1))
for i in range(len(p1)):
s[i] = G1 * p1[i] / (e*e + G2*p2[i] + G3*p3[i])
return s
S1 = calculateS(p[0], p[1], p[2], G[0][0], G[0][1], G[0][2])
S2 = calculateS(p[1], p[0], p[2], G[1][1], G[1][0], G[1][2])
S3 = calculateS(p[2], p[0], p[1], G[2][2], G[2][0], G[2][1])
S = np.asarray([S1, S2, S3])
#plot figure of p
plt.figure(1)
for i in range(3):
plt.plot(t, p[i], label = 'p' + str(i+1))
plt.ylabel('power level')
plt.xlabel('t [s]')
plt.legend()
plt.show()
#plot figure of S
plt.figure(2)
for i in range(3):
plt.plot(t, S[i], label = 'S' + str(i+1))
plt.plot(t, a*r*np.ones(len(t)), label = 'Desired SINR')
plt.ylabel('ratio')
plt.xlabel('t [s]')
plt.legend()
plt.show()
|
984,113 | e80660925a09d43be723cd425b26db666ecec666 | """
This is my hailstone unit test suite.
It uses the unittest framework. Admittedly, it does not do much.
"""
import unittest
from hail1 import f
class TestHailStones(unittest.TestCase):
"""
The main class for testing the hailstone sequence generator.
"""
def test_f(self):
"""currently the only test in this suite."""
ans = [0, 0, 1, 7, 2, 5, 8, 16, 3, 19, 6]
for i in range(1, 11):
print(i)
self.assertEqual(f(i), ans[i])
def foo( ):
"""
An independent function.
I needed another function to illustrate the docstring for a function that was not a member of a class.
"""
pass |
984,114 | 569651d9998fc4753bbd18cf2b6215e650be042d | from ..environment_variables import monkey_available
if monkey_available('AMQP_BACKEND'):
from . import amqp # noqa
amqp.__package__
if monkey_available('RPC_BACKEND'):
from . import rpc # noqa
rpc.__package__
|
984,115 | ac0db50cfb24d3dcc2b1f250122ba08cd1c01e2b | import algorithms.a_star_path_finding as pf
import astar_utils as autils
reload(autils)
a = pf.AStar()
walls = ((0, 5), (1, 0), (1, 1), (1, 5), (2, 3),
(3, 1), (3, 2), (3, 5), (4, 1), (4, 4), (5, 1))
start = (0,0)
end= (5,5)
a.init_grid(6, 6, walls, start, end)
path = a.solve()
fig=figure(1)
clf()
autils.plotAstar(a,path)
show()
|
984,116 | ad4bc29dfe26bc097c2d5d8b06e11867e3a33ae5 | for i in xrange(input()):
if input() <= 8000:
print("Inseto!")
else:
print("Mais de 8000!")
|
984,117 | bfae340a168aa2a69d843eb7d9a9e0779875e032 | from flask import Blueprint
passport_blu = Blueprint("passport_blu",__name__,url_prefix="/passport")
from .views import passport_blu |
984,118 | 75f369c1d855704373255ab7cadbfcd25cf83899 | #!/usr/bin/python3
#coding=utf-8
############################################################
#
# 本脚本提前执行一次,用以保存登陆状态
#
############################################################
import os,sys
if not sys.version_info[0] == 3:
print("当前脚本只能在python3.x下运行,请更换您的python版本!")
sys.exit()
import api
import json
def main():
root_path = os.path.dirname(os.path.realpath(sys.argv[0]))
client = api.Client()
username = input('请输入您的账号:')
password = input('请输入您的密码:')
cookies_file = os.path.join(root_path, username + ".cookies")
# chapter_file = os.path.join(root_path, "captcha.png")
# if client.login(username, password, chapter_file):
# 直播接口无需验证码,弃用原有网页登陆接口
if client.live_login(username, password):
#存储cookies
client.save_cookies(cookies_file)
#存储username
config_path = os.path.join(root_path, 'username.config')
json_content = {"username": username}
f = open(config_path, 'w')
f.write(json.dumps(json_content))
f.close()
#提示语
client.get_account_info()
print('欢迎您:', client.userdata['uname'])
print('登陆状态已储存,您现在可以使用其他功能脚本啦')
else:
sys.exit()
if __name__ == '__main__':
main() |
984,119 | b1c1e975db2e2a76c48253135a752bb35a3ac8be | """
Attenzione: Questo programma rispetta quello originale ma ne è stato fatto il porting per python3
"""
# Moduli
from sys import argv
import string
# Strutture dati Globali
vocali = ('a', 'e', 'i', 'o', 'u')
mesi = ('a', 'b', 'c', 'd', 'e', 'h', 'l', 'm', 'p', 'r', 's', 't')
comuni = {'udine': 'l483', 'verona': 'l781', 'legnago': 'e512', 'thiene': 'l157', 'cina': 'z210'}
# CODICI DI CONTROLLO
regole_pari = {}
alfabeto = string.ascii_lowercase
for i in range(0, 10):
regole_pari[str(i)] = i
for i in range(0, 26):
regole_pari[alfabeto[i]] = i
regole_dispari = {}
temp_tuple = (1, 0, 5, 7, 9, 13, 15, 17, 19, 21)
for i in range(0, 10):
regole_dispari[str(i)] = temp_tuple[i]
regole_dispari[alfabeto[i]] = temp_tuple[i]
temp_tuple2 = (2, 4, 18, 20, 11, 3, 6, 8, 12, 14, 16, 10, 22, 25, 24, 23)
index = 0
for i in range(10,26):
regole_dispari[alfabeto[i]] = temp_tuple2[index]
index += 1
regole_resto = [alfabeto[i] for i in range(0,26)]
# ------------------------------
def estrai_nome_cognome(aString):
temp_string = ''
for aChar in aString:
if not aChar in vocali:
temp_string += aChar
if len(temp_string) >= 3:
break
index = 0
while len(temp_string) < 3:
if not aString[index] in temp_string:
temp_string += aString[index]
index += 1
return temp_string
def genera_mese(unMese):
return mesi[int(unMese)-1]
def codice_comune(comune):
return comuni[comune]
def genera_giorno(unGiorno, unSesso):
if int(unGiorno) in range(1,31):
if unSesso == 'm':
return unGiorno
elif unSesso == 'f':
return str(int(unGiorno)+40)
def genera_codice_controllo(aCodiceFiscale):
parita = 1
temp_dispari = 0
temp_pari = 0
for aChar in aCodiceFiscale:
if parita:
temp_dispari += int(regole_dispari.get(aChar))
parita = 0
else:
temp_pari += int(regole_pari.get(aChar))
parita = 1
return regole_resto[(temp_dispari+temp_pari) % 26]
def main(argv):
# nome = raw_input("Nome: ").lower()
# cognome = raw_input("Cognome: ").lower()
# data_nascita = raw_input("Data di nascita (gg/mm/aaaa): ").lower()
# comune = raw_input("Comune di nascita: ").lower()
# sesso = raw_input("Sesso (m/f): ").lower()
cognome = argv[1].lower()
nome = argv[2].lower()
data_nascita = argv[3].lower()
comune = argv[4].lower()
sesso = argv[5].lower()
nomeCF = estrai_nome_cognome(nome)
cognomeCF = estrai_nome_cognome(cognome)
data_nascitaCF = data_nascita.split("/")
anno_nascitaCF = data_nascitaCF[2][2:]
mese_nascitaCF = genera_mese(data_nascitaCF[1])
giorno_nascitaCF = genera_giorno(data_nascitaCF[0], sesso)
codice_fiscale = cognomeCF + nomeCF + anno_nascitaCF + mese_nascitaCF + giorno_nascitaCF + codice_comune(comune.lower())
codiceCF = genera_codice_controllo(codice_fiscale)
codice_fiscale += codiceCF
return codice_fiscale
if __name__ == '__main__': # pragma: no cover
main(argv)
|
984,120 | 5e4636fb0c20bf98dae9c5259dc0c0d1386d94e8 | from misc import queries
import re
from modules.users import auxiliary
async def _change_debts_dictionary(debtor_uid, creditor_uid, money):
"""
Update table debts.
:param debtor_uid: user id of a creditor.
:param creditor_uid: user id of a debtor.
:param money: credit.
"""
debt_of_creditor = int((await queries.get_debt(creditor_uid, debtor_uid))['value'])
# check whether debt of a creditor is 0
if debt_of_creditor == 0:
await queries.update_debt(money, debtor_uid, creditor_uid)
# debt of a creditor is less than a given credit
elif debt_of_creditor <= money:
await queries.update_debt(money - debt_of_creditor, debtor_uid, creditor_uid)
await queries.update_debt(0, creditor_uid, debtor_uid)
# debt of a creditor is greater than a given credit
elif debt_of_creditor > money:
current_debt_of_creditor = int((await queries.get_debt(creditor_uid, debtor_uid))['value'])
await queries.update_debt(current_debt_of_creditor - money, creditor_uid, debtor_uid)
await queries.update_debt(0, debtor_uid, creditor_uid)
async def give(message):
"""
Give debt for specified users.
:param message: user's message.
:return: reply.
"""
# get information about the user
uid = message['from']['id']
name = message['from']['first_name']
if not re.fullmatch(r'/give [1-9]+[0-9]*( @\w+)+', message['text']):
# message fails to parse
return 'Message does not match the required format. Check rules in /help.'
# get amount of give money
money = int(message['text'].split(' ')[1])
# get list of aliases from message
aliases_raw = message['text'].replace('@', '').split(' ')[2:]
# get parsed aliases and aliases that were failed to validate
aliases, fail_verification, fail_verification_str = await auxiliary.check_presence_users(aliases_raw)
if message['from']['username'] in aliases:
return 'You cannot lend money to yourself twice!'
if len(fail_verification) != 0:
# some aliases fail validation
return 'User with alias(es) ' + fail_verification_str + 'do(es) not registered in bot.'
# amount on money per each user
share_give = money / (len(aliases) + 1)
# string with aliases of debtors
aliases_str = ''
for alias_ in aliases:
aliases_str += '@' + alias_ + ' '
# get user id of debtor
debtor_uid = (await queries.get_user_by_alias(alias_))['uid']
# update table debts
await _change_debts_dictionary(debtor_uid, uid, share_give)
message_give = name + ', you have given ' + str(share_give) + ' to ' + aliases_str
return message_give
async def share(message):
"""
Share money for all users.
:param message: user's message.
:return: reply.
"""
# get information about the user
uid = message['from']['id']
name = message['from']['first_name']
if not re.fullmatch(r'/share [1-9]+[0-9]*', message['text']):
# message fails to parse
return 'Message does not match the required format. Check rules in /help.'
# get amount of money from message
money = int(message['text'].split(' ')[1])
# number of users
users = await queries.get_users()
number_users = len(users)
# amount on money per each user
share_money = money / number_users
# update table debts for all users except creditor
for user in users:
if user['uid'] != uid:
# all users except creditor
debtor_uid = user['uid']
# update table debts
await _change_debts_dictionary(debtor_uid, uid, share_money)
message_share = name + ', ' + str(money) + ' was shared among all users of the bot.'
return message_share
|
984,121 | 095dbe05df6c55cc61a6665afe171faa5d7d17db | import tempfile
import os
from pathlib import Path, PurePosixPath
import binascii
import pytest
import pygit2
import yaml
import gitpathlib
from gitpathlib import testutil
from gitpathlib import hex_oid
from gitpathlib.gp_pygit import PygitBackend
from gitpathlib.gp_subprocess import SubprocessBackend
@pytest.fixture(scope='session')
def testrepo(tmpdir_factory):
contents = yaml.safe_load("""
- tree:
same:
file: |
Here are the contents of a file
same2:
file: |
Here are the contents of a file
extra:
file: |
Here are the contents of a file
extra:
Here are the contents of a file
diff-filename:
different: |
Here are the contents of a file
diff-content:
file: |
Here are different contents
- tree:
dir:
file: |
Here are old contents of a file
- tree:
dir:
file: |
Here are the contents of a file
link-up: [link, ..]
link-dot: [link, .]
link-self-rel: [link, ../dir]
link-self-abs: [link, /dir]
subdir:
file: contents
link-back: [link, ../..]
file-utf8: ċóňťëñŧş ☺
file-utf16: [binary, [255, 254, 11, 1, 243, 0, 72, 1, 101,
1, 235, 0, 241, 0, 103, 1, 95, 1, 32,
0, 58, 38]]
file-binary: [binary, [115, 111, 109, 101, 0, 100, 97, 116,
97, 255, 255]]
file-lines: "unix\\nwindows\\r\\nmac\\rnone"
link: [link, dir/file]
broken-link: [link, nonexistent-file]
link-to-dir: [link, dir]
abs-link: [link, /dir/file]
abs-link-to-dir: [link, /dir/]
abs-broken-link: [link, /nonexistent-file]
self-loop-link: [link, self-loop-link]
abs-self-loop-link: [link, /self-loop-link]
loop-link-a: [link, loop-link-b]
loop-link-b: [link, loop-link-a]
executable: [executable, '#!/bin/sh']
""")
path = os.path.join(str(tmpdir_factory.mktemp('repos')), 'testrepo')
testutil.make_repo(path, contents)
return pygit2.Repository(path)
@pytest.fixture(params=['pygit2', '/usr/bin/git'])
def get_path(request, testrepo):
if request.param == 'pygit2':
backend = PygitBackend()
elif request.param == '/usr/bin/git':
backend = SubprocessBackend()
backend._assertions = {}
else:
raise ValueError(request.param)
def _get_path(*args, **kwargs):
kwargs.setdefault('backend', backend)
return gitpathlib.GitPath(testrepo.path, *args, **kwargs)
yield _get_path
if request.param == '/usr/bin/git':
for assertion, paths in backend._assertions.items():
print('Assertion:', assertion.__name__)
for func, path in set(paths):
print(' ', path.root[:7], path.parts[1:],
'in', func.__name__)
assertion(path)
@pytest.fixture
def part0(testrepo):
tree = testrepo.head.peel(pygit2.Tree).hex
return os.path.realpath(testrepo.path) + ':' + tree
@pytest.fixture
def cloned_repo(tmpdir, testrepo):
path = os.path.join(str(tmpdir), 'clonedrepo')
return pygit2.clone_repository(testrepo.path, path)
def test_head(testrepo, get_path):
path = get_path()
assert hex_oid(path) == testrepo.head.peel(pygit2.Tree).hex
def test_parent(testrepo, get_path):
path = get_path('HEAD^')
parent = testrepo.head.peel(pygit2.Commit).parents[0]
assert hex_oid(path) == parent.peel(pygit2.Tree).hex
def test_components(testrepo, get_path):
path = get_path('HEAD', 'dir', 'file')
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir/file').hex
def test_parts_empty(get_path, part0):
path = get_path('HEAD')
assert path.parts == (part0, )
def test_parts(get_path, part0):
path = get_path('HEAD', 'dir', 'file')
assert path.parts == (part0, 'dir', 'file')
def test_parts_slash(get_path, part0):
path = get_path('HEAD', 'dir/file')
assert path.parts == (part0, 'dir', 'file')
def test_parts_slashdot(get_path, part0):
path = get_path('HEAD', 'dir/./file')
assert path.parts == (part0, 'dir', 'file')
def test_dotdot(get_path, part0):
path = get_path('HEAD', 'dir/../dir/file')
assert path.parts == (part0, 'dir', '..', 'dir', 'file')
def test_hash(get_path):
path1 = get_path('HEAD')
path2 = get_path('master')
assert hash(path1) == hash(path2)
def test_eq(get_path):
path1 = get_path('HEAD')
path2 = get_path('master')
assert path1 == path2
def test_eq_dir(get_path):
path1 = get_path('HEAD', 'dir')
path2 = get_path('HEAD', 'dir')
assert path1 == path2
def test_ne(get_path):
path1 = get_path('HEAD', 'dir')
path2 = get_path('HEAD', 'dir', 'file')
assert path1 != path2
def test_eq_across_repos(testrepo, cloned_repo):
path1 = gitpathlib.GitPath(testrepo.path)
path2 = gitpathlib.GitPath(cloned_repo.path)
assert path1 == path2
def test_ne_different_roots(get_path):
path1 = get_path('HEAD', 'dir', 'file')
path2 = get_path('HEAD:dir', 'file')
assert path1 != path2
def test_slash(testrepo, get_path):
path = get_path() / 'dir'
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir').hex
def test_slash_multiple(testrepo, get_path):
path = get_path() / 'dir' / 'file'
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir/file').hex
def test_slash_combined(testrepo, get_path):
path = get_path() / 'dir/file'
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir/file').hex
def test_slash_pathlib(testrepo, get_path):
path = get_path() / Path('dir/file')
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir/file').hex
def test_slash_absolute_str(testrepo, get_path):
path = get_path('HEAD', 'dir') / '/dir/file'
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir/file').hex
def test_slash_absolute_path(testrepo, get_path):
path = get_path('HEAD', 'dir') / Path('/dir/file')
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir/file').hex
def test_no_open(testrepo, get_path):
with pytest.raises(TypeError):
open(get_path('HEAD', 'dir', 'file'))
def test_str_and_repr(testrepo, get_path):
path = get_path('HEAD', 'dir', 'file')
repo = os.path.realpath(testrepo.path)
hex = testrepo.revparse_single('HEAD:').hex
expected = "gitpathlib.GitPath('{repo}', '{hex}', 'dir', 'file')".format(
repo=repo, hex=hex)
assert str(path) == expected
assert repr(path) == expected
def test_no_bytes(get_path):
with pytest.raises(TypeError):
path = get_path('HEAD', 'dir', 'file')
bytes(path)
def test_drive(get_path, testrepo):
path = get_path('HEAD', 'dir', 'file')
assert path.drive == os.path.realpath(testrepo.path)
def test_root(testrepo, get_path):
path = get_path('HEAD', 'dir', 'file')
assert path.root == testrepo.revparse_single('HEAD:').hex
def test_anchor(testrepo, get_path):
path = get_path('HEAD', 'dir', 'file')
repodir = os.path.realpath(testrepo.path)
tree = testrepo.revparse_single('HEAD:').hex
assert path.anchor == repodir + ':' + tree
def test_parents(get_path):
root = get_path()
path = root / 'dir' / 'file'
parents = path.parents
assert parents == (root / 'dir', root)
def test_parents_dotdot(get_path):
root = get_path()
path = root / 'dir' / '..' / 'file'
parents = path.parents
assert parents == (root / 'dir' / '..', root / 'dir', root)
def test_parent(get_path):
root = get_path()
path = root / 'dir'
assert path.parent == root
def test_parent_dotdot(get_path):
root = get_path()
path = root / 'dir' / '..' / 'file'
assert path.parent == root / 'dir' / '..'
def test_name(get_path):
path = get_path() / 'dir'
assert path.name == 'dir'
def test_name_root(get_path):
path = get_path()
assert path.name == ''
def test_suffix_and_friends_0(get_path):
path = get_path('HEAD', 'archive')
assert path.suffix == ''
assert path.suffixes == []
assert path.stem == 'archive'
def test_suffix_and_friends_1(get_path):
path = get_path('HEAD', 'archive.tar')
assert path.suffix == '.tar'
assert path.suffixes == ['.tar']
assert path.stem == 'archive'
def test_suffix_and_friends_2(get_path):
path = get_path('HEAD', 'archive.tar.gz')
assert path.suffix == '.gz'
assert path.suffixes == ['.tar', '.gz']
assert path.stem == 'archive.tar'
def test_suffix_and_friends_3(get_path):
path = get_path('HEAD', 'archive.tar.gz.xz')
assert path.suffix == '.xz'
assert path.suffixes == ['.tar', '.gz', '.xz']
assert path.stem == 'archive.tar.gz'
def test_as_posix_not_callable(get_path):
path = get_path()
with pytest.raises(TypeError):
path.as_posix()
def test_as_uri_not_callable(get_path):
path = get_path()
with pytest.raises(ValueError):
path.as_uri()
def test_is_absolute(get_path):
path = get_path()
assert path.is_absolute()
def test_is_reserved(get_path):
path = get_path()
assert not path.is_reserved()
def test_joinpath(testrepo, get_path):
path = get_path().joinpath('dir')
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir').hex
def test_joinpath_multiple(testrepo, get_path):
path = get_path().joinpath('dir', 'file')
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir/file').hex
def test_joinpath_combined(testrepo, get_path):
path = get_path().joinpath('dir/file')
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir/file').hex
def test_joinpath_pathlib(testrepo, get_path):
path = get_path().joinpath(Path('dir/file'))
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir/file').hex
def test_joinpath_absolute_str(testrepo, get_path):
path = get_path('HEAD', 'dir').joinpath('/dir/file')
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir/file').hex
def test_joinpath_absolute_path(testrepo, get_path):
path = get_path('HEAD', 'dir').joinpath(Path('/dir/file'))
assert hex_oid(path) == testrepo.revparse_single('HEAD:dir/file').hex
@pytest.mark.parametrize(
'pattern',
[
'file', '*le', 'dir/*le', '*',
'/dir/file', '/dir/*le', '*/file', '/dir/file/', '*/*', '/*/*'
]
)
def test_match_positive(get_path, pattern):
path = get_path('HEAD', 'dir', 'file')
assert path.match(pattern)
@pytest.mark.parametrize(
'pattern',
[
'bogus', 'dir', 'dir/',
'/dir/fi', '/*/*/*',
]
)
def test_match_negative(get_path, pattern):
path = get_path('HEAD', 'dir', 'file')
assert not path.match(pattern)
@pytest.mark.parametrize(
['path', 'expected'],
[
('dir', 'file'),
('/dir', 'file'),
('/', 'dir/file'),
('', 'dir/file'),
('dir/file', '.'),
]
)
def test_relative_to_positive(get_path, path, expected):
path1 = get_path('HEAD', 'dir', 'file')
path2 = get_path('HEAD', path)
assert path1.relative_to(path2) == PurePosixPath(expected)
@pytest.mark.parametrize(
['rev', 'path'],
[
('HEAD', 'dir/file'),
('HEAD:dir', 'file'),
('HEAD', 'diff'),
('HEAD:dir', '.'),
]
)
def test_relative_to_negative(get_path, rev, path):
path1 = get_path('HEAD', 'dir')
path2 = get_path(rev, path)
with pytest.raises(ValueError):
path1.relative_to(path2)
def test_with_name_positive(get_path, part0):
path = get_path('HEAD', 'dir', 'file')
path = path.with_name('otherfile')
assert path.parts == (part0, 'dir', 'otherfile')
def test_with_name_noname(get_path):
path = get_path('HEAD')
with pytest.raises(ValueError):
path = path.with_name('otherfile')
@pytest.mark.parametrize('badname', ['', 'bad/name', 'bad\0name'])
def test_with_name_badname(get_path, badname):
path = get_path('HEAD', 'dir', 'file')
with pytest.raises(ValueError):
path = path.with_name(badname)
def test_with_suffix_positive(get_path, part0):
path = get_path('HEAD', 'dir', 'file.txt')
path = path.with_suffix('.py')
assert path.parts == (part0, 'dir', 'file.py')
def test_with_name_noname(get_path):
path = get_path('HEAD')
with pytest.raises(ValueError):
path = path.with_suffix('.py')
@pytest.mark.parametrize('badsuffix', ['', 'py', './py', '.\0?', '.'])
def test_with_name_badsuffix(get_path, badsuffix):
path = get_path('HEAD', 'dir', 'file')
with pytest.raises(ValueError):
path = path.with_suffix(badsuffix)
def test_cwd(get_path):
path = get_path('HEAD')
assert path.cwd() == Path.cwd()
def test_home(get_path):
path = get_path('HEAD')
assert path.home() == Path.home()
def check_stat(meth, mode, expected_hex, size, exception):
if exception:
with pytest.raises(exception):
meth()
return
stat = meth()
print(oct(stat.st_mode))
assert stat.st_mode == stat[0] == mode
assert stat.st_ino == stat[1]
assert stat.st_ino.to_bytes(20, 'little') == expected_hex
assert stat.st_dev == stat[2] == -1
assert stat.st_nlink == stat[3] == 1
assert stat.st_uid == stat[4] == 0
assert stat.st_gid == stat[5] == 0
assert stat.st_size == stat[6] == size
assert stat.st_atime == stat[7] == 0
assert stat.st_mtime == stat[8] == 0
assert stat.st_ctime == stat[9] == 0
@pytest.mark.parametrize(
['path', 'mode', 'size', 'exception', 'expected_hex'],
[
('/', 0o40000, 12, None, None),
('/dir', 0o40000, 6, None, None),
('/dir/file', 0o100644, 32, None,
'97bb8d0a5bebd62bdeb53110b239a87d9942d2aa'),
('/executable', 0o100755, 9, None, None),
('/link', 0o100644, 32, None,
'97bb8d0a5bebd62bdeb53110b239a87d9942d2aa'),
('/link-to-dir', 0o40000, 6, None,
'cafc64d830ca1d2f3dcbf23af25a4e03167b538f'),
('/broken-link', None, None, gitpathlib.ObjectNotFoundError, None),
('/loop-link-a', None, None, RuntimeError, None),
('/nonexistent-file', None, None,
gitpathlib.ObjectNotFoundError, None),
]
)
def test_stat(testrepo, get_path, path, mode, size, exception, expected_hex):
path = get_path('HEAD', path)
if exception:
expected_hex = None
else:
if expected_hex:
expected_hex = binascii.unhexlify(expected_hex)
else:
expected_hex = testrepo[hex_oid(path)].id.raw
check_stat(path.stat, mode, expected_hex, size, exception)
@pytest.mark.parametrize(
['path', 'mode', 'size', 'exception', 'expected_hex'],
[
('/', 0o40000, 12, None, None),
('/dir', 0o40000, 6, None, None),
('/dir/file', 0o100644, 32, None,
'97bb8d0a5bebd62bdeb53110b239a87d9942d2aa'),
('/executable', 0o100755, 9, None, None),
('/link', 0o120000, 8, None,
'dea97c3520a755e4db5694d743aa8599511bbe9c'),
('/link-to-dir', 0o120000, 3, None,
'87245193225f8ff56488ceab0dcd11467fe098d0'),
('/broken-link', 0o120000, 16, None,
'b3394ad552da18d1b3d6a5c7e603520408d35425'),
('/loop-link-b', 0o120000, 11, None,
'2b5652f1154a7aa2f62054230d116332d959d009'),
('/nonexistent-file', None, None, gitpathlib.ObjectNotFoundError,
None),
]
)
def test_lstat(testrepo, get_path, path, mode, size, exception, expected_hex):
path = get_path('HEAD', path)
if exception:
expected_hex = None
else:
if expected_hex:
expected_hex = binascii.unhexlify(expected_hex)
else:
expected_hex = testrepo[hex_oid(path)].id.raw
check_stat(path.lstat, mode, expected_hex, size, exception)
@pytest.mark.parametrize(
'meth_name',
['chmod', 'mkdir', 'rename', 'replace', 'rmdir', 'symlink_to', 'touch',
'unlink', 'write_bytes', 'write_text', 'lchmod'])
def test_mutate(get_path, meth_name):
path = get_path('HEAD')
meth = getattr(path, meth_name)
with pytest.raises(PermissionError):
meth()
with pytest.raises(PermissionError):
meth(0)
with pytest.raises(PermissionError):
meth('/foo')
with pytest.raises(PermissionError):
meth(b'foo')
@pytest.mark.parametrize(
'meth_name',
['is_socket', 'is_fifo', 'is_block_device', 'is_char_device'])
@pytest.mark.parametrize(
'path',
['/', '/dir', '/link', '/dir/file', '/nonexistent-file',
'/broken-link'])
def test_exotic(get_path, meth_name, path):
path = get_path('HEAD', path)
meth = getattr(path, meth_name)
assert meth() == False
@pytest.mark.parametrize('strict', (True, False))
@pytest.mark.parametrize(
['path', 'expected'],
[
('.', '/'),
('/', '/'),
('/.', '/'),
('/./.', '/'),
('/dir', '/dir'),
('/dir/file', '/dir/file'),
('/dir/.', '/dir'),
('/dir/..', '/'),
('/dir/../.', '/'),
('/dir/./..', '/'),
('/dir/../dir', '/dir'),
('/dir/./.././dir', '/dir'),
('/dir/link-up', '/'),
('/dir/./link-up/.', '/'),
('/dir/link-dot', '/dir'),
('/dir/link-self-rel', '/dir'),
('/dir/link-self-abs', '/dir'),
('/link', '/dir/file'),
('/link-to-dir', '/dir'),
('/link-to-dir/.', '/dir'),
('/link-to-dir/file', '/dir/file'),
('/abs-link', '/dir/file'),
('/abs-link-to-dir', '/dir'),
('/abs-link-to-dir/.', '/dir'),
('/abs-link-to-dir/file', '/dir/file'),
])
def test_resolve_good(get_path, path, expected, strict):
path = get_path('HEAD', path)
expected_path = get_path('HEAD', expected)
assert path.resolve(strict) == expected_path
@pytest.mark.parametrize('strict', (True, False))
@pytest.mark.parametrize(
['path', 'expected'],
[
('/broken-link', '/nonexistent-file'),
('/broken-link/more/stuff', '/nonexistent-file/more/stuff'),
('/broken-link/more/../stuff', '/nonexistent-file/stuff'),
('/link-to-dir/../broken-link/stuff', '/nonexistent-file/stuff'),
('/abs-broken-link', '/nonexistent-file'),
('/abs-broken-link/more', '/nonexistent-file/more'),
('/dir/nonexistent/..', '/dir'),
('/dir/nonexistent/.', '/dir/nonexistent'),
#('/dir/file/..', '/dir'), # XXX - what to do here?
])
def test_resolve_ugly(get_path, path, expected, strict):
path = get_path('HEAD', path)
expected_path = get_path('HEAD', expected)
if strict:
with pytest.raises(gitpathlib.ObjectNotFoundError):
path.resolve(strict)
else:
assert path.resolve(strict) == expected_path
@pytest.mark.parametrize('strict', (True, False))
@pytest.mark.parametrize(
'path',
[
'/self-loop-link',
'/self-loop-link/more',
'/abs-self-loop-link',
'/abs-self-loop-link/more',
'/loop-link-a',
'/loop-link-a',
'/loop-link-b/more',
'/loop-link-b/more',
])
def test_resolve_bad(get_path, path, strict):
path = get_path('HEAD', path)
with pytest.raises(RuntimeError):
path.resolve(strict)
@pytest.mark.parametrize('path', ['/dir', '/dir/file', 'bla/bla'])
def test_expaduser(get_path, path):
path = get_path('HEAD', path)
assert path.expanduser() == path
@pytest.mark.parametrize(
'path',
[
'/',
'/dir',
'/dir/file',
'/link',
'/link-to-dir/file',
'/dir/file/..',
])
def test_exists(get_path, path):
path = get_path('HEAD', path)
assert path.exists()
@pytest.mark.parametrize(
'path',
[
'/nonexistent-file',
'/broken-link',
'/dir/nonexistent-file',
'/dir/../nonexistent-file',
'/dir/nonexistent/..',
])
def test_not_exists(get_path, path):
path = get_path('HEAD', path)
assert not path.exists()
@pytest.mark.parametrize(
['directory', 'contents'],
[
('/', {'dir', 'link', 'broken-link', 'link-to-dir', 'abs-link',
'abs-link-to-dir', 'abs-broken-link', 'self-loop-link',
'abs-self-loop-link', 'loop-link-a', 'loop-link-b',
'executable'}),
('/dir', {'file', 'link-up', 'link-dot', 'link-self-rel',
'link-self-abs', 'subdir'}),
])
def test_iterdir(get_path, directory, contents):
path = get_path('HEAD', directory)
expected = set(
get_path('HEAD', directory, content)
for content in contents
)
assert set(path.iterdir()) == set(expected)
@pytest.mark.parametrize(
['path', 'exception'],
[
('/dir/file', gitpathlib.NotATreeError),
('/link', gitpathlib.NotATreeError),
('/nonexistent-file', gitpathlib.ObjectNotFoundError),
('/broken-link', gitpathlib.ObjectNotFoundError),
])
def test_iterdir_fail(get_path, path, exception):
path = get_path('HEAD', path)
with pytest.raises(exception):
assert set(path.iterdir())
@pytest.mark.parametrize(
['path', 'expected'],
[
('/', True),
('/dir', True),
('/dir/file', False),
('/link', False),
('/link-to-dir', True),
('/nonexistent-file', False),
('/broken-link', False),
('/dir/nonexistent/..', False),
('/dir/file/..', True), # XXX - what to do here?
])
def test_is_dir(get_path, path, expected):
path = get_path('HEAD', path)
assert path.is_dir() == expected
@pytest.mark.parametrize(
['path', 'expected'],
[
('/', False),
('/dir', False),
('/dir/file', True),
('/link', True),
('/link-to-dir', False),
('/nonexistent-file', False),
('/broken-link', False),
('/dir/nonexistent/..', False),
('/dir/file/..', False),
])
def test_is_file(get_path, path, expected):
path = get_path('HEAD', path)
assert path.is_file() == expected
@pytest.mark.parametrize(
['path', 'expected'],
[
('/', False),
('/dir', False),
('/dir/file', False),
('/link', True),
('/link-to-dir', True),
('/nonexistent-file', False),
('/broken-link', True),
('/dir/nonexistent/..', False),
('/dir/file/..', False),
('/link-to-dir/subdir/..', False),
])
def test_is_symlink(get_path, path, expected):
path = get_path('HEAD', path)
assert path.is_symlink() == expected
@pytest.mark.parametrize(
['directory', 'pattern', 'matches'],
[
('/', 'dir', {'dir'}),
('/', '*link', {'link', 'broken-link', 'abs-link', 'abs-broken-link',
'self-loop-link', 'abs-self-loop-link'}),
('/', '**/file', {'dir/file', 'dir/subdir/file',
'link-to-dir/file', 'link-to-dir/subdir/file',
'abs-link-to-dir/file', 'abs-link-to-dir/subdir/file',
}),
('/', '**', {'/', 'dir', 'dir/subdir',
'link-to-dir', 'abs-link-to-dir',
'link-to-dir/subdir', 'abs-link-to-dir/subdir'}),
('/', '**/..', {'/..', 'dir/..', 'dir/subdir/..',
'link-to-dir/..', 'abs-link-to-dir/..',
'link-to-dir/subdir/..', 'abs-link-to-dir/subdir/..'}),
('/file', '*', {}),
('/dir', '../ex*e', {'dir/../executable'}),
])
def test_glob(get_path, directory, pattern, matches):
path = get_path('HEAD', directory)
expected = {
get_path('HEAD', match)
for match in matches
}
assert set(path.glob(pattern)) == expected
@pytest.mark.parametrize(
['directory', 'pattern', 'exception'],
[
('/', '', ValueError),
('/', '/', NotImplementedError),
])
def test_glob_bad(get_path, directory, pattern, exception):
path = get_path('HEAD', directory)
with pytest.raises(exception):
list(path.glob(pattern))
@pytest.mark.parametrize(
['directory', 'pattern', 'matches'],
[
('/', 'file', {'dir/file', 'dir/subdir/file',
'link-to-dir/file', 'link-to-dir/subdir/file',
'abs-link-to-dir/file', 'abs-link-to-dir/subdir/file',
}),
('/', '', {'/', 'dir', 'dir/subdir',
'link-to-dir', 'abs-link-to-dir',
'link-to-dir/subdir', 'abs-link-to-dir/subdir'}),
('/', '.', {'/', 'dir', 'dir/subdir',
'link-to-dir', 'abs-link-to-dir',
'link-to-dir/subdir', 'abs-link-to-dir/subdir'}),
('/', '..', {'/..', 'dir/..', 'dir/subdir/..',
'link-to-dir/..', 'abs-link-to-dir/..',
'link-to-dir/subdir/..', 'abs-link-to-dir/subdir/..'}),
])
def test_rglob(get_path, directory, pattern, matches):
path = get_path('HEAD', directory)
expected = {
get_path('HEAD', match)
for match in matches
}
assert set(path.rglob(pattern)) == expected
@pytest.mark.parametrize(
['directory', 'pattern', 'exception'],
[
('/', '/', NotImplementedError),
('/', '/dir', NotImplementedError),
])
def test_rglob_bad(get_path, directory, pattern, exception):
path = get_path('HEAD', directory)
with pytest.raises(exception):
list(path.rglob(pattern))
def test_group(get_path):
path = get_path('HEAD')
with pytest.raises(KeyError):
path.group()
def test_owner(get_path):
path = get_path('HEAD')
with pytest.raises(KeyError):
path.owner()
@pytest.mark.parametrize(
['path', 'expected'],
[
('/dir/file', b'Here are the contents of a file\n'),
('/link', b'Here are the contents of a file\n'),
])
def test_read_bytes(get_path, path, expected):
path = get_path('HEAD', path)
assert path.read_bytes() == expected
@pytest.mark.parametrize(
['path', 'exception'],
[
('/dir', gitpathlib.NotABlobError),
('/link-to-dir', gitpathlib.NotABlobError),
('/nonexistent-file', gitpathlib.ObjectNotFoundError),
('/broken-link', gitpathlib.ObjectNotFoundError),
])
def test_read_bytes_exc(get_path, path, exception):
path = get_path('HEAD', path)
with pytest.raises(exception):
path.read_bytes()
@pytest.mark.parametrize(
['path', 'expected'],
[
('/dir/file', 'Here are the contents of a file\n'),
('/link', 'Here are the contents of a file\n'),
])
def test_read_text(get_path, path, expected):
path = get_path('HEAD', path)
assert path.read_text() == expected
@pytest.mark.parametrize(
['path', 'exception'],
[
('/dir', gitpathlib.NotABlobError),
('/link-to-dir', gitpathlib.NotABlobError),
('/nonexistent-file', gitpathlib.ObjectNotFoundError),
('/broken-link', gitpathlib.ObjectNotFoundError),
])
def test_read_text_exc(get_path, path, exception):
path = get_path('HEAD', path)
with pytest.raises(exception):
path.read_text()
def test_open(get_path):
path = get_path('HEAD', 'dir/subdir/file')
with path.open() as f:
assert f.read() == 'contents'
def test_open_rt(get_path):
path = get_path('HEAD', 'dir/subdir/file')
with path.open(mode='rt') as f:
assert f.read() == 'contents'
def test_open_utf8(get_path):
path = get_path('HEAD', 'dir/subdir/file-utf8')
with path.open() as f:
assert f.read() == 'ċóňťëñŧş ☺'
def test_open_utf8_explicit(get_path):
path = get_path('HEAD', 'dir/subdir/file-utf8')
with path.open(encoding='utf-8') as f:
assert f.read() == 'ċóňťëñŧş ☺'
def test_open_utf8_bad(get_path):
path = get_path('HEAD', 'dir/subdir/file-utf16')
with pytest.raises(UnicodeDecodeError):
with path.open() as f:
f.read()
def test_open_utf8_errors(get_path):
path = get_path('HEAD', 'dir/subdir/file-utf16')
expected = '��\x0b\x01�\x00H\x01e\x01�\x00�\x00g\x01_\x01 \x00:&'
with path.open(errors='replace') as f:
assert f.read() == expected
def test_open_utf16(get_path):
path = get_path('HEAD', 'dir/subdir/file-utf16')
with path.open(encoding='utf-16') as f:
assert f.read() == 'ċóňťëñŧş ☺'
@pytest.mark.parametrize(
'mode', ['', 'w', 'x', 'a', 'b', 't', '+', 'U', 'rr', 'rbt', 'bt',
'r+', 'rw', 'rx', 'ra', '?'])
def test_open_bad_mode(get_path, mode):
path = get_path('HEAD', 'dir/file')
with pytest.raises(ValueError):
path.open(mode=mode)
def test_open_binary(get_path):
path = get_path('HEAD', 'dir/subdir/file-binary')
with path.open('rb') as f:
assert f.read() == b'some\x00data\xff\xff'
def test_open_binary_encoding(get_path):
path = get_path('HEAD', 'dir/subdir/file-binary')
with pytest.raises(ValueError):
path.open('rb', encoding='utf-8')
def test_open_binary_errors(get_path):
path = get_path('HEAD', 'dir/subdir/file-binary')
with pytest.raises(ValueError):
path.open('rb', errors='strict')
def test_open_binary_newline(get_path):
path = get_path('HEAD', 'dir/subdir/file-binary')
with pytest.raises(ValueError):
path.open('rb', newline='')
@pytest.mark.parametrize(
['newline', 'expected'],
[
(None, ['unix\n', 'windows\n', 'mac\n', 'none']),
('', ['unix\n', 'windows\r\n', 'mac\r', 'none']),
('\n', ['unix\n', 'windows\r\n', 'mac\rnone']),
('\r\n', ['unix\nwindows\r\n', 'mac\rnone']),
('\r', ['unix\nwindows\r', '\nmac\r', 'none']),
])
def test_open_newline(get_path, newline, expected):
path = get_path('HEAD', 'dir/subdir/file-lines')
with path.open('rb') as f:
assert f.read() == b'unix\nwindows\r\nmac\rnone'
with path.open(newline=newline) as f:
print(f)
assert f.readlines() == expected
@pytest.mark.parametrize(
['rev1', 'path1', 'rev2', 'path2', 'expected'],
[
('HEAD^^', 'same/file', 'HEAD', 'dir/file', True),
('HEAD^^', 'same/file', 'HEAD^^', 'same2/file', True),
('HEAD', 'dir/file', 'HEAD', 'dir', False),
('HEAD^^', 'same', 'HEAD^^', 'same2', True),
('HEAD^^', 'same', 'HEAD', 'dir', False),
('HEAD^^', 'same', 'HEAD^^', 'extra', False),
('HEAD^^', 'same', 'HEAD^^', 'diff-filename', False),
('HEAD^^', 'same', 'HEAD^^', 'diff-content', False),
('HEAD', 'dir/file', 'HEAD', 'link', True),
('HEAD', 'link-to-dir', 'HEAD', 'dir', True),
('HEAD', 'link', 'HEAD', 'link', True),
])
def test_samefile(get_path, rev1, path1, rev2, path2, expected):
path1 = get_path(rev1, path1)
path2 = get_path(rev2, path2)
assert path1.samefile(path2) == expected
@pytest.mark.parametrize(
['path', 'exception'],
[
('nonexistent-file', gitpathlib.ObjectNotFoundError),
('broken-link', gitpathlib.ObjectNotFoundError),
('self-loop-link', RuntimeError),
])
def test_samefile_bad_path(get_path, path, exception):
path1 = get_path('HEAD', 'dir')
path2 = get_path('HEAD', path)
with pytest.raises(exception):
path1.samefile(path2)
@pytest.mark.parametrize(
'other',
[
'a string',
Path('/dir'),
Path('dir'),
3j-8,
])
def test_samefile_otherobject(get_path, other):
path = get_path('HEAD', 'dir')
assert path.samefile(other) == False
|
984,122 | e176cff410da058422dd2316f7f67374271b7e30 | # 1~ 20 합을 구하기
s = 0
count = 0
while count < 10:
print(count)
s += (count+1)
count += 1
print(s)
#break
for n in range(10):
if n > 5:
break
print(n , end = '')
i = 0
while i < 10:
if i > 5:
break
print(i, end = '')
i += 1
print("\n----------------------------------------")
# continue
i = 0
while i < 10:
if i <=5:
i += 1
continue
print(i, end='')
i += 1
print('\n===============================')
# 무한루프
i = 0
while True:
print(i, end='')
if i > 5:
break
i += 1 |
984,123 | 5d84a506db3b6c1a6cc1ff312e5ad5c02b8f613d | #!/usr/bin/env python
import subprocess
def log(data):
print("[+] " + data)
class Git:
def __init__(self, base_path, log_path = './faveo_release.log'):
self.basePath = base_path
self.logPath = log_path
def execute(self, *args):
"""
Performs git operations based on the params passed
:param args:
:return:
"""
default_params = ["git", "-C", self.basePath]
all_params = default_params + list(args)
subprocess.call(all_params, stdout=open(self.logPath, 'a'), stderr=open(self.logPath, 'a'))
def checkout(self, branch_name):
log("checking out to "+branch_name)
self.execute("stash")
self.execute("clean", "-fd")
self.execute("checkout", branch_name, "-f")
self.execute("fetch")
self.execute("reset", "--hard", "origin/"+branch_name)
log("checked out to "+branch_name)
def sync_remote_branch_with_current_branch(self, remote_branch):
log("force pushing current branch code to " + remote_branch)
self.execute("push", "origin", "HEAD:" + remote_branch, "-f")
log("force pushed current branch code to " + remote_branch)
def commit_and_publish(self, branch):
log("committing all changes")
self.execute("add", ".")
self.execute("commit", "-m", "product configuration updated", "-n")
log("committed all changes")
self.sync_remote_branch_with_current_branch(branch)
def export(self, branch, path):
absolute_path = path+'/'+branch+".zip"
log('exporting source code to '+absolute_path)
subprocess.call(['mkdir', '-p', path])
self.execute('archive', '--format', 'zip', '--output', absolute_path, branch)
log('exported successfully')
|
984,124 | 2cf20a0cdf39d0a857cfdca72bd9430c7d4b28ff | import csv
file = open('dict.csv','w')
f1 = open('snp500_formatted.txt','r')
f2 = open('../nlp/snp500_formatted.txt','r')
c = csv.writer(file)
c1 = csv.reader(f1)
c2 = csv.reader(f2)
c1 = list(c1)
c2 = list(c2)
for i in range(len(c1)):
data = []
data.extend((c1[i][0],c2[i][0]))
c.writerow(data)
|
984,125 | f504bb7fca7efcb0cbd78939d97c6c1d9f26f3b5 | # Generated by Django 2.2 on 2020-08-04 11:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0002_auto_20200504_0155'),
]
operations = [
migrations.AlterField(
model_name='plant',
name='country',
field=models.CharField(choices=[('Slovensko', 'Slovakia'), ('Cesko', 'Czechia'), ('Rakusko', 'Austria'), ('Polsko', 'Poland'), ('Nemecko', 'Germany')], default=('Slovensko', 'Slovakia'), max_length=30),
),
]
|
984,126 | 7fcbf6d554c5a14916c66f9972799f22b66e7884 | from thread_queue import Queue
import concurrent.futures
import logging
import random
import time
def producer(pipeline):
"""Pretend we're getting a message from the network."""
for index, iteration in enumerate(range(10)):
obj = random.randint(1, 101)
logging.info("Producer %s: produced object %s", index, obj)
logging.debug("Producer %s: about to acquire lock", index)
pipeline._lock.acquire()
pipeline.shift(obj, "Producer", index)
logging.info("Producer %s: added object %s to queue", index, obj)
pipeline._lock.release()
logging.debug("Producer %s: released lock", index)
time.sleep(1)
logging.info("Producer %s: complete", index)
def consumer(pipeline, index=0):
"""Pretend we're saving a number in the database."""
pipeline._lock.acquire()
while pipeline.count() != 0:
logging.info("Consumer %s: about to check queue for next object", index)
obj = pipeline.unshift("Consumer", index)
pipeline._lock.release()
index += 1
time.sleep(5)
logging.info("Consumer %s: consumed object %s", index, obj)
consumer(pipeline, index)
else:
logging.info("Consumer: pipeline is empty")
pipeline._lock.release()
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.DEBUG,
datefmt="%H:%M:%S")
# logging.getLogger().setLevel(logging.DEBUG)
pipeline = Queue()
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
executor.submit(producer, pipeline)
executor.submit(consumer, pipeline) |
984,127 | feb32e77ef31cbbdbc50538f3b1c4e32cbbe7a35 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
from my_allennlp.allennlp.modules.elmo import Elmo
import torch
import argparse
import numpy as np
from tqdm import tqdm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--inputs', default='bin_path/')
parser.add_argument('--om_output')
parser.add_argument('--option_file', default='elmo_2x4096_512_2048cnn_2xhighway_options.json')
parser.add_argument('--weight_file', default='elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5')
opt = parser.parse_args()
elmo = Elmo(opt.option_file, opt.weight_file, 1)
elmo.eval()
om_output_path = opt.om_output
similarity = 0
nums = len(os.listdir(opt.inputs))
cos = torch.nn.CosineSimilarity(dim=0)
for idx, i in enumerate(tqdm(range(nums))):
input_file = np.fromfile(opt.inputs + '{0}.bin'.format(i),
dtype='int32').reshape((1, 8, 50))
input_file = torch.from_numpy(input_file)
om_output_file = np.fromfile(os.path.join(om_output_path, '{0}_0.bin'.format(i)),
dtype='float32').reshape((1, 8, 1024))
om_output_file = torch.Tensor(om_output_file.flatten().astype(dtype='float64'))
output = elmo.forward(input_file)
output = output['elmo_representations'][0].detach().flatten()
cosine_sim = float(cos(om_output_file, output))
similarity += cosine_sim
print('average similarity: ', similarity / nums)
if __name__ == '__main__':
main()
|
984,128 | c371d82a2339a33d5ddba1da790d5422054fa579 | from django import forms
class SearchForm(forms.Form):
sim_id = forms.CharField(label='sim_id', max_length=100)
|
984,129 | b1298c9efe877ce27e9abb7dad534d0714d5f4f8 | import re
from bs4 import BeautifulSoup
import requests
from django.db.models import Q
def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def build_query(query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
'''
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query:
or_query = or_query |q
else:
or_query = q
if query:
query = query | or_query
else:
query = or_query
return query
def search(request,model,fields,query_param="q" ):
"""
"""
query_string = request.GET.get(query_param,"").strip()
if not query_string:
return model.objects.filter(user=request.user)
entry_query = build_query(query_string, fields)
#print('entry_query=%s'%entry_query)
found_entries = model.objects.filter(entry_query,user=request.user)
return found_entries
def get_metas(html):
field=u'property'
soup = BeautifulSoup(html)
metas = soup('meta',property=re.compile('og:'))
if (len(metas)==0):
field=u'name'
metas=soup('meta',attrs={'name': re.compile('og:')})
ret={}
for x in [m.attrs for m in metas]:
ret[x[field]]=unicode(x[u'content'])
#print('metas=%s'%ret)
return ret
|
984,130 | 848e1f63af30a191db25054b691ac8352121d00f | import sys
def solution():
n = int(sys.stdin.readline().rstrip())
def dfs(n):
global count
if n == 0:
count += 1
elif n < 0:
return
else:
dfs(n - 1)
dfs(n - 2)
dfs(n - 3)
dfs(n)
print(count)
if __name__ == "__main__":
T = int(input())
for _ in range(T):
count = 0
solution() |
984,131 | d45bd3f9134c87ae78c80d3991670e26d0e8dced | from requests_html import HTMLSession
import pandas as pd
# 建立一个会话(session),即让Python作为一个客户端,和远端服务器交谈
url = 'https://www.jianshu.com/p/85f4624485b9'
session = HTMLSession()
# 利用 session 的 get 功能,把这个链接对应的网页整个儿取回来
r = session.get(url)
# 打印出返回的HTML中的文字部分
# print(r.html.text)
# 打印出返回的HTML中的绝对链接部分
# print(r.html.absolute_links)
# 定位我们具体要获取的元素HTML相对位置
sel = 'body > div.note > div.post > div.article > div.show-content > div > p > a'
# 获取链接和文字的步骤, 作为一个重复性的动作通过定义一个函数来实现
def get_text_link_from_sel(sel):
mylist = []
try:
results = r.html.find(sel)
for result in results:
mytext = result.text
mylink = list(result.absolute_links)[0]
mylist.append((mytext, mylink))
return mylist
except:
return None
# 通过Pandas把数据转换为数据表
df = pd.DataFrame(get_text_link_from_sel(sel))
# 定义数据表的表头
df.columns = ['Text', 'Link']
# 导出到Excel文件, utf_8_sig以防中文乱码
df.to_csv('Output/Sample.csv', encoding='utf_8_sig', index=False)
|
984,132 | 9a955506f18c63931dd9270539312be726d0d66c | # -*- coding: utf-8 -*-
import os, re
from nltk.tokenize.stanford import StanfordTokenizer
from nltk.tokenize import sent_tokenize, word_tokenize
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def absref( lines ): #txt中是否有referenc 或者 bibliography(参考文献)
cnt = 0
for line in lines:
if not line: continue
line = line.lower()
if line.find('reference') != -1 or line.find('bibliography') != -1: cnt += 1
if cnt == 0:
( filepath, filename ) = os.path.split(dest_file)
print "No references: " + filename
return False
return True
def paralength( lines ): #txt段落长度是否足够(需平均大于60且300字符以上超过10段)
total = 0
lcnt = 0
para = 0
for line in lines:
if not line: continue
lcnt += 1
length = len(line)
total += length
if length > 300:
para += 1
if lcnt == 0:
( filepath, filename ) = os.path.split(dest_file)
print 'Empty txt: ' + filename
return False
else:
average = float(total)/float(lcnt)
if average < 60:
if para <= 10:
( filepath, filename ) = os.path.split(dest_file)
print "Low paralength: " + filename
return False
return True
import string
def check_text_format(text): # 判断是否包含过多非法字符
if not text:
return False
valid_chars = 0
for i in text:
if i in string.printable:
valid_chars += 1
# TODO: log valid_chars and len(text)
return float(valid_chars)/len(text) > 0.6
def checkpdfbytxt( lines ): #通过txt大致判断是否是合格论文
if not check_text_format( lines ): return False
if not paralength( lines ): return False
if not absref( lines ): return False
return True
def wordnum( line, minn ): #该句line中词数需大于minn
words = line.split(' ')
cnt = 0
for word in words:
if word != '':
cnt += 1
if cnt <= minn:
# print line
return False
return True
def notended( line ): #判断该句是否结束
length = len(line)
if line[length-1].isalpha() or line[length-1].isdigit(): #以数字或字母结尾
# print line
return False
if line[length-2] == ' ': #倒数第二个字符是空格(正常.?!会紧跟单词)
# print line
return False
cnt1 = 0
for i in range(length): #匹配左右括号
if line[i] == '(' or line[i] == '[' or line[i] == '{': cnt1 += 1
elif line[i] == ')' or line[i] == ']' or line[i] == '}': cnt1 -= 1
if cnt1 < 0: return False
if cnt1 != 0:
# print line
return False
return True
def averageword( line , minl, maxl ): #平均单词长度需要大于minl小于maxl
words = line.split(' ')
total = 0
cntwd = 0
for word in words:
if word == '': continue
total += len(word)
cntwd += 1
average = float(total) / float(cntwd) if cntwd else 0
if average < minl:
# print average
# print line
return False
if average > maxl:
# print average
# print line
return False
return True
def formulas( line, ratio ): #非字母比例不得超过ratio
total = 0
cnt = 0
length = len(line)
for i in range(length):
if line[i] == ' ': continue
total += 1
if line[i].isalpha() != True: cnt += 1
average = float(cnt) / float(total)
if average > ratio:
# print average
# print line
return False
return True
def bigCharas( line, ratio ): #大写字母比例不得超过ratio
total = 0
cnt = 0
length = len(line)
for i in range(length):
if line[i] == ' ': continue
if line[i].isalpha() != True: continue
total += 1
if line[i].isupper(): cnt += 1
average = float(cnt) / float(total)
if average > ratio:
# print average
# print line
return False
return True
def startwithlower(line): #句子不得以小写字母或符号开头
if line[0].islower():
# print line
return False
# if line[0].isalpha() == False and line[0].isdigit() == False:
# print line
# return False
return True
def refine_para(line): #判断段落是否合格
if averageword( line, 3, 10 ) == False: return False
if formulas( line, 0.2 ) == False: return False
if bigCharas( line, 0.2 ) == False: return False
return True
def refine_sent(line): #判断句子是否合格
if averageword( line, 2, 10 ) == False: return False
if wordnum( line, 3 ) == False: return False
if formulas( line, 0.2 ) == False: return False
if bigCharas( line, 0.4 ) == False: return False
if notended( line ) == False: return False
if startwithlower(line) == False: return False
return True
def sentsfromparas( paras ): #从很多段落中提取句子
sents = []
for line in paras:
if not line: continue
line = line.strip('\n')
line = line.strip('\r')
sentences = sent_tokenize(line)
for sentence in sentences:
if refine_sent(sentence) == False: continue
sents.append(sentence)
return sents
def getbigparas( lines ): #从txt中提取段落
paras = []
for line in lines:
if len(line) > 300:
if refine_para(line) == False: continue
paras.append(line)
return paras
def writeto( sents, paperid, to_folder ): #把获得的句子按格式写入to_folder文件夹中
name = str(paperid) + '.txt'
dest_file = os.path.join( to_folder, name )
with open( dest_file, 'w' ) as f:
for sent in sents:
# tokens = StanfordTokenizer().tokenize(sent) #效率过低
tokens = word_tokenize(sent)
for token in tokens:
f.write( token + ' ' )
f.write('\n')
def _toRemove(line):
if line == '': return True
if not (line[0].isupper() or line[0].isdigit()) or \
line.startswith('ACM Classification :') or \
line.startswith('Keywords :') or \
line.startswith('To copy otherwise ,') or \
line.startswith('Permission to make digital or hard copies') or \
('<' in line and '>' in line): # HTML tags
return True
m = re.match(r'Copyright \d{4} ACM', line)
if m:
return True
m = re.search(r'http:\\/\\/', line)
if m:
return True
m = re.search(r'(Figure|Fig|Table|Tab) \d+ :', line)
if m:
return True
m = re.search(r'[^\.\?!\'"].Permission', line)
if m:
return True
if len(line.split()) <= 4:
return True
return False
def refine_lines(sentences):
lineList=[filter(lambda c: ord(c) < 128, s.replace(u'\xa0', u' ')) for s in sentences]
resultLineList=[]
for line in lineList:
if len(line) > 0:
line = re.sub(r'-LRB-.*?-RRB-', '', line)
line = re.sub(r'-LRB-', '', line)
line = re.sub(r'-RRB-', '', line)
line = re.sub(r'-LSB-.*?-RSB-', '', line)
line = re.sub(r'-LSB-', '', line)
line = re.sub(r'-RSB-', '', line)
line = re.sub(r'\(.*?\)', '', line)
line = re.sub(r'\(', '', line)
line = re.sub(r'\)', '', line)
line = re.sub(r'\[.*?\]', '', line)
line = re.sub(r'\[', '', line)
line = re.sub(r'\]', '', line)
if _toRemove(line):
if line != '\n':
# print line[0:-1]
pass
else:
resultLineList.append(line)
return resultLineList
def changestatus( paperid, status ): #更改服务器中paper状态为status #待填补
return
def refine(text):
lines = [l for l in text.split('\n') if l]
paras = getbigparas(lines)
sents = sentsfromparas(paras)
sents = refine_lines(sents)
# writeto( sents, paperid, to_folder )
return ''.join([l + '\n' for l in sents if l])
def start(root,out):
import sys
for name in os.listdir(root):
with open(os.path.join(root, name)) as fin:
#print fin
text = fin.read()
refined = refine(text)
print name, len(refined.split())
if not refined:
continue
with open(os.path.join(out, name), 'w') as fout:
fout.write(refined)
start('extracted','refined')
|
984,133 | a4953681bd8aa8272ef84377830f6160af69226b | import os
import shutil
from git import Repo
from .Builder import Builder
# Possible configuration
# repository.uri (optional) => The uri of the git repository to clone
# repository.tag (mandatory) => The tag to checkout to before building
class Vulkan(Builder):
default_repository_uri = 'https://github.com/KhronosGroup/Vulkan-Docs.git'
def _clone(self):
self.logger.info('Vulkan: Clone main repository')
repo = None
if not os.path.isdir('vulkan'):
repo = Repo.clone_from(self.config['repository']['uri'], 'vulkan')
else:
repo = Repo('vulkan')
repo.git.checkout(self.config['repository']['tag'])
return True
def _copy_files(self):
vulkan_root_path = os.path.join(self.args.path, 'vulkan')
vulkan_include_path = os.path.join(vulkan_root_path, 'include')
self.logger.info('Vulkan: Create directories')
if not os.path.isdir(vulkan_include_path):
os.makedirs(vulkan_include_path)
self.logger.info('Vulkan: Copy include files')
real_include_path = os.path.join(vulkan_include_path, 'vulkan')
if not os.path.isdir(real_include_path):
shutil.copytree('vulkan/src/vulkan', real_include_path)
return True
|
984,134 | 4280b4580541c4bfb1c36043b5634995623a5244 | from time import ctime
from bs4 import BeautifulSoup
import itchat
import urllib.request
from pandas import Series
@itchat.msg_register(itchat.content.TEXT)
def getcity(msg):
print(msg["Text"])
pinyin = msg['Text']
results = getTour(pinyin)
itchat.send(results, msg['FromUserName'])
# 登录网页版微信
itchat.login()
Help = """友情提示:
请输入经典拼音获取经典信息
注意:
陕西——请输入shaanxi
吉林市——请输入jilinshi
抚州请输入——jiangxifuzhou
"""
itchat.send(Help, toUserName='filehelper')
urllib.request.urlopen('https://lvyou.baidu.com')
|
984,135 | da14e0e02aaeaf8aa19f934432c90e55728726f0 | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
"""The explainer module for the CASPR library.
Modules:
:module1_name: A description of this specific module.
"""
|
984,136 | ca92ef93d6c74561aef289b81d343d8f0a8151a3 | import bs4
import random
import math
red_box = bs4.BeautifulSoup(open("figure//red_box"), "xml")
red_cylinder=bs4.BeautifulSoup(open("figure//red_cylinder"),"xml")
red_sphere=bs4.BeautifulSoup(open("figure//red_sphere"),"xml")
green_box = bs4.BeautifulSoup(open("figure//green_box"), "xml")
green_cylinder=bs4.BeautifulSoup(open("figure//green_cylinder"),"xml")
green_sphere=bs4.BeautifulSoup(open("figure//green_sphere"),"xml")
blue_box = bs4.BeautifulSoup(open("figure//blue_box"), "xml")
blue_cylinder=bs4.BeautifulSoup(open("figure//blue_cylinder"),"xml")
blue_sphere=bs4.BeautifulSoup(open("figure//blue_sphere"),"xml")
R=4.0
class figures:
ox=0
oy=0
name=""
figure_list=[]
def create_figure(red_box,soup,quantity_red_box,string,coordinate):
model_red_box = []
for model in red_box.find_all('model'):
if model.attrs["name"] == "unit_"+string:
model_red_box.append(model)
global_red_box_list = []
red_box_list = []
while (quantity_red_box != 0):
temp = model_red_box[1]
global_red_box = model_red_box[0]
temp.attrs["name"] = "unit_"+string + "_" + str(quantity_red_box)
for child in temp.link.descendants:
if child.name == "pose":
# print(child)
rand_x = random.uniform(-9.5, 31.5)
rand_y = random.uniform(-8.5, 31.5)
coordinates = child.string
coo_cyl = coordinates.split(" ")
coo_cyl[0] = rand_x
coo_cyl[1] = rand_y
figure=figures()
figure.ox,figure.oy,figure.name=coo_cyl[0],coo_cyl[1],string
figure_list.append(figure)
if len(figure_list) > 1:
for form1 in figure_list:
count = 0
while(True):
for form in figure_list:
diff_oy=form.oy-form1.oy
diff_ox=form1.ox-form.oy
hyp= math.hypot(diff_ox,diff_oy)
if(hyp!=0):
if(hyp>=R):
count+=1
else:
while(hyp<R):
coo_cyl[0] = random.uniform(-9.5, 31.5)
coo_cyl[1] = random.uniform(-8.5, 31.5)
form.ox, form.oy = coo_cyl[0], coo_cyl[1]
diff_oy = form.oy - form1.oy
diff_ox = form1.ox - form.oy
hyp = math.hypot(diff_ox, diff_oy)
count+=1
# print(str(count)+" "+str(len(figure_list)))
if count > len(figure_list)-1:
break
coordinate.write(str(figure.ox)+" "+str(figure.oy)+" "+figure.name+"\n")
child.string = ""
temp.pose.string = ""
global_red_box.pose.string = ""
for i in coo_cyl:
child.string += str(i) + " "
temp.pose.string += str(i) + " "
global_red_box.pose.string += str(i) + " "
red_box_list.append(str(temp))
global_red_box.attrs["name"] = "unit_"+string + "_" + str(quantity_red_box)
global_red_box_list.append(str(global_red_box))
quantity_red_box -= 1
full_text = str(soup).split("\n")
full_text_list = full_text
i = 0
while (i < len(red_box_list)):
count = 0
for word in full_text_list:
if word == "<sim_time>13 781000000</sim_time>":
full_text.insert(count, red_box_list[i])
break;
count += 1
i += 1
i = 0
while (i < len(global_red_box_list)):
count = 0
for word in full_text_list:
if word == "<gravity>0 0 -9.8</gravity>":
full_text.insert(count, global_red_box_list[i])
break;
count += 1
i += 1
# print(len(figure_list))
return full_text
def create_soup(full_text):
f = open('result_world', "w")
for word in full_text:
f.write(word)
f.write("\n")
f.close()
soup=bs4.BeautifulSoup(open("result_world"),"xml")
return soup
|
984,137 | d5331a86ed840f438709df8abc5b0362cd5109da | """.. Ignore pydocstyle D400.
======================
Command: elastic_index
======================
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from django.core.management.base import BaseCommand
from resolwe.elastic.builder import index_builder
class Command(BaseCommand):
"""Build ElasticSearch indexes."""
help = "Build ElasticSearch indexes."
def handle(self, *args, **options):
"""Command handle."""
index_builder.build(push=False)
index_builder.push()
|
984,138 | 26c9667d69025d170e5d1d1842328228c45e24e7 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 15 21:53:12 2018
@author: ASUS
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
#MNIST数据集的相关的常数
#输入层的节点数,对于MNIST数据集,这个等于图片的像素
INPUT_NODE = 784
#输出层的节点数,这个等于类别的数目。因为在MNIST数据集中需要区分的是0-9这10个数字。
OUTPUT_NODE = 10
#设置神经网络的参数
#隐藏层节点数,这里只使用一个隐藏层的网络结构作为样例。这个隐藏层有500个节点。
LAYER1_NODE = 500
#一个训练batch中的训练数据个数。数字越小时,训练过程接近随机梯度下降;数据越大时,训练接近梯度下降
BACTH_SIZE = 100
#基础的学习率
LEARNING_RATE_BASE = 0.8
#学习率的衰减率
LEARNING_RATE_DECAY = 0.99
#描述模型复杂度的正则化项在损失函数中的系数
REGULARIZATION_RATE = 0.001
#训练次数
TRAINING_STEPS = 3000
#滑动平均衰减
MOVING_AVERAGE_DECAY = 0.99
#一个辅助函数,给定神经网络的输入和所有参数,计算神经网络的前向传播结果。在这里定义一个ReLU激活函数的三层全链接神经网络。
#通过加入隐藏层实现多层网络结构,通过ReLU激活函数实现去线性化。在这个函数中也支持传入用于计算参数均值的类。这样方便在测试时使用滑动平均模型
def inference(input_tensor,avg_class,weights1,biases1,weights2,biases2):
#当没有提供滑动平均类是,直接使用参数当前的取值
if avg_class == None:
#计算隐藏层的前向传播结果,这里使用了ReLU激活函数
layer1 = tf.nn.relu(tf.matmul(input_tensor,weights1)+biases1)
#计算输出层的前向传播结果,因为在计算损失函数时会一并计算softmax函数,所以这里不需要加入激活函数。而且不加入softmax不会影响预测结果。
#因为预测时使用的是不用于对应节点输出值的相对大小,有没有softmax层对最后的分类结果的计算没有影响。于是在计算整个神经网络的前向传播时
#可以不加最后的softmax层。
return tf.matmul(layer1,weights2)+biases2
#否则,使用滑动平均值
else:
#首先使用avg_class.average函数来计算得出变量的滑动平均值。
#然后再计算相应的神经网络前向传播的结果。
layer1 = tf.nn.relu(tf.matmul(input_tensor,weights1)+avg_class.average(biases1))
return tf.matmul(layer1,avg_class.average(weights2))+avg_class.average(biases2)
#定义训练过程
def train(mnist):
#占位符,定义x,y_变量
x = tf.placeholder(tf.float32,[None,INPUT_NODE],name = 'x-input')
y_ = tf.placeholder(tf.float32,[None,OUTPUT_NODE],name = 'y-input')
#生成隐藏层的参数
weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE,LAYER1_NODE],stddev = 0.1))
biases1 = tf.Variable(tf.constant(0.1,shape=[LAYER1_NODE]))
#生成输出层的参数
weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE,OUTPUT_NODE],stddev = 0.1))
biases2 = tf.Variable(tf.constant(0.1,shape=[OUTPUT_NODE]))
#计算在当前参数下神经网络前向传播的结果。这里给出的用于计算滑动平均的类为None,所以函数不会使用参数滑动平均
y = inference(x,None,weights1,biases1,weights2,biases2)
#定义存储训练的变量。这个变量不需要计算滑动平均,所以这里指定这个变量为不可训练的变量(trainable = False)。在使用TensorFlow训练神经网络时,
#一般会将代表训练轮数的变量指定为不可训练的参数
global_step = tf.Variable(0,trainable = False)
#给定滑动平均衰减率和训练轮数的变量,初始化滑动平均类。
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
#在所有代表神经网络参数的变量上使用滑动平均,其他的辅助变量(比如global_step)就不需要了。tf.trainable_variables返回的就是图像
#GraphKeys.TRAINABLE_VARIABLES中的元素。这个集合的元素就是所有没有指定trainable = False的参数
variable_averages_op = variable_averages.apply(tf.trainable_variables())
#计算使用了滑动平均之后的前向传播的结果,滑动平均不会改变变量本身的取值,而是会维护一个影子变量来记录其滑动平均值。所以当需要使用这个滑动
#平均值时,需要明确调用average函数
average_y = inference(x,variable_averages,weights1,biases1,weights2,biases2)
#计算交叉熵作为刻画预测值和真实值之间差距的损失函数。函数第一个参数是神经网络不包括softmax层的前向传播结果,第二层是训练数据的正确答案。
#因为答案是一个长度为10的数组,而该函数是需要提供一个正确答案的数字,所以使用tf.argmax()来得到正确答案对应的类别编号。
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels =tf.arg_max(y_,1))
#计算在当前batch中所有样例的交叉熵的平均值
cross_entropy_mean = tf.reduce_mean(cross_entropy)
#计算L2正则化的损失函数
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
#一般只计算权重的正则化损失,而不是用偏置项
regularizaton = regularizer(weights1) +regularizer(weights2)
#总损失等于交叉熵损失与正则化损失的和
loss = cross_entropy_mean + regularizaton
#设置指数衰减的学习率
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BACTH_SIZE,
LEARNING_RATE_DECAY)
#使用GD梯度下降优化算法优化损失函数
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step = global_step)
#在训练神经网络模型时,每过一遍数据急需要通过反向传播来更新神经网络的参数,又要更新每一个参数的滑动平均值。为了一次完成操作:
#tf.control_dependencies()\tf.group()两种机制均能实现
#train_OP = tf.group(train_step,variable_averages_op)
with tf.control_dependencies([train_step,variable_averages_op]):
train_op = tf.no_op(name = 'train')
#检验使用了滑动均值模型的神经网络前向传播结果是否正确。tf.argmax(average_y,1)计算每一个样例的预测答案。其中average_y是一个batch_size*10的二维
#数组,每一行表示一个样例的前向传播结果,tf.argmax的第二个参数‘1’表示选取最大值的操作在第一个维度中进行,也就是说,只在每一行选择最大值对应的下标。
#于是得到的结果是一个长度为batch的一维数组,这个一维数组中的值就表示了每一个样例对应的数字识别的结果。
#tf.equal()判断两个张量是否相等。
correct_prediction = tf.equal(tf.arg_max(average_y,1),tf.argmax(y_,1))
#首先讲bool值转化为数值,然后局算平均值
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
#初始会话,并开始训练过程
with tf.Session() as sess:
#将所有的参数变量初始化
tf.initialize_all_variables().run()
#验证数据
validate_feed = {x:mnist.validation.images,y_:mnist.validation.labels}
#测试数据
test_feed= {x:mnist.test.images,y_:mnist.test.labels}
#迭代训练神经网络
for i in range(TRAINING_STEPS):
if i%1000 == 0:
validate_acc = sess.run(accuracy,feed_dict=validate_feed)
print('循环次数:%d,正确率:%g'%(i,validate_acc))
#产生这一轮使用的一个batch数据,并运行训练过程
xs,ys = mnist.train.next_batch(BACTH_SIZE)
sess.run(train_op,feed_dict = {x:xs,y_:ys})
'''
#上述是处理好的batch,不用写循环。
for i in range(Steps):
#每次选择batch_size个样本进行训练
#初始定位为整个数据集batch_size的倍数,且一定是小于dataset_size的数
start = (i*batch_size)%dataset_size
#print(start)
##结束位置一般是加上一个batch_size,另外如果取到最后一个batch的时候,刚好是最后一个数据集的位置,两者和等于最后位置时,取最后的位置。
end = min(start+batch_size,dataset_size)
#通过选取的样本训练神经网络并更新参数
sess.run(train_step,feed_dict={x:X[start:end],y_:Y[start:end]})
if i%1000==0:
#每隔一段时间计算所有数据的交叉熵并输出
total_cross_entropy = sess.run(cross_entropy,feed_dict={x:X,y_:Y})
print('循环:%d,交叉熵:%g'%(i,total_cross_entropy))
'''
test_acc = sess.run(accuracy,feed_dict=test_feed)
print('正确率:%g'%test_acc)
#SSS = train(mnist)
def main(argv = None):
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
train(mnist)
if __name__ == '__main__':
tf.app.run()
|
984,139 | 84979950c3b7ec8ea81b28abf4975d7553f56a08 | import playsound
import schedule
import time
def song():
print("song")
playsound.playsound("OnTheHouseTop.mp3")
schedule.every().day.at("09:00").do(song)
while 1:
schedule.run_pending()
time.sleep(1) |
984,140 | 0c1ad4e5fd091ef050912e60295c4ac7b853c23d | from core import serializers
from core.models import User
from rest_framework import viewsets
from rest_framework.response import Response
from django.contrib.auth import get_user_model
from rest_framework.authentication import TokenAuthentication
from core.permissions import UpdateOwnProfileOnly
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
app_name = 'user'
class UserProfileViewSet(viewsets.ModelViewSet):
"""For creating and updating profiles"""
serializer_class = serializers.UserSerializer
queryset = User.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (UpdateOwnProfileOnly,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginView(ObtainAuthToken):
"""Creates user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
|
984,141 | 14a3d874dc9c00a4a94c1678b7e5036f1b41d989 | from __future__ import unicode_literals
import itertools
import hashlib
import logging
import os
import os.path
import sqlite3
import sys
from mopidy import local
from mopidy.exceptions import ExtensionError
from mopidy.local import translator
from mopidy.models import Ref, SearchResult
import uritools
from playhouse.sqlite_ext import SqliteExtDatabase
from . import Extension
from .utils import to_track, to_album, to_artist, check_track
import db
logger = logging.getLogger(__name__)
class MoppinaLibrary(local.Library):
name = 'moppina'
def __init__(self, config):
self._config = ext_config = config[Extension.ext_name]
self._data_dir = Extension.get_data_dir(config)
try:
self._media_dir = config['local']['media_dir']
except KeyError:
raise ExtensionError('Mopidy-Local not enabled')
self._dbpath = os.path.join(self._data_dir, 'moppina.db')
self._connection = SqliteExtDatabase(self._dbpath, pragmas={
'journal_mode': 'wal',
'cache_size': -1 * 64000, # 64MB
'foreign_keys': 1,
'ignore_check_constraints': 0
})
self._db = db.Database(self._connection)
logger.info('The Moppina library has started successfully')
def add(self, track, tags=None, duration=None):
try:
logger.debug('Try to add the track %s to the Moppina library',
track)
self._db.upsert_track(check_track(track))
except Exception as e:
logger.exception('Failed to add %s to the Moppina library')
def begin(self):
logger.debug('Begin scan local library with Moppina')
return itertools.imap(to_track, self._db.tracks())
def browse(self, uri):
logger.info('Browse Moppina library for uri %s', uri)
try:
if uri == self.ROOT_DIRECTORY_URI:
return [
Ref.directory(uri='local:artists', name='Artists'),
Ref.directory(uri='local:albums', name='Albums'),
Ref.directory(uri='local:tracks', name='Tracks')
]
elif uri.startswith('local:artists'):
return [Ref.artist(uri=a.uri, name=a.name) \
for a in self._db.artists()]
elif uri.startswith('local:albums'):
return [Ref.album(uri=a.uri, name=a.name) \
for a in self._db.albums()]
elif uri.startswith('local:tracks'):
return [Ref.track(uri=t.uri, name=t.name) \
for t in self._db.tracks()]
elif uri.startswith('local:artist'):
return [Ref.album(uri=a.uri, name=a.name) \
for a in self._db.albums_by_artist(uri)]
elif uri.startswith('local:album'):
return [Ref.track(uri=t.uri, name=t.name) \
for t in self._db.tracks_by_album(uri)]
else:
raise ValueError('Invalid browse URI')
except Exception as e:
logger.error('Error while browsing Moppina library for %s: %s',
uri, e)
return []
def clear(self):
logger.info('Clear the Moppina library database')
self._db.clear()
return True
def close(self):
logger.info('Close the Moppina library database')
self._db.close()
def flush(self):
return True
def get_distinct(self, field, query=None):
logger.info('Moppina library get distinct for %s: %s', field, query)
return self._db.get_distinct(field, query or {})
def load(self):
logger.debug('Load the Moppina library')
track_count = self._db.tracks_count()
logger.info('%s tracks has been loaded by Moppina library',
track_count)
return track_count
def lookup(self, uri):
logger.info('Lookup Moppina library for %s', uri)
if uri.startswith('local:album'):
return list(itertools.imap(to_track,
self._db.tracks_by_album(uri)
))
elif uri.startswith('local:artist'):
return list(itertools.imap(to_track,
self._db.tracks_by_artist(uri)
))
elif uri.startswith('local:track'):
return list(itertools.imap(to_track,
self._db.track_by_uri(uri)
))
else:
logger.error('Error looking up the Moppina library: '
'invalid lookup URI %s', uri)
return []
def remove(self, uri):
logger.info('Remove %s from the Moppina library')
self._db.delete_track(uri)
def search(self, query, limit=100, offset=0, exact=False, uris=None):
logger.info('Search the Moppina library for %s: %s', query, exact)
if not query:
tracks = self._db.tracks().limit(limit).offset(offset)
mopidy_tracks = itertools.imap(to_track, tracks)
return SearchResult(uri='local:search', tracks=mopidy_tracks)
artists = []
albums = []
tracks = []
if exact:
artists, albums, tracks = self._db.search(query, limit, offset)
else:
artists, albums, tracks = self._db.fts_search(query, limit, offset)
mopidy_artists = list(itertools.imap(to_artist, artists))
mopidy_albums = list(itertools.imap(to_album, albums))
mopidy_tracks = list(itertools.imap(to_track, tracks))
return SearchResult(uri='local:search',
artists=mopidy_artists,
albums=mopidy_albums,
tracks=mopidy_tracks)
|
984,142 | de470854c8bae5d942ef2d8ccd77d1e59edd6862 | from globals import *
import life as lfe
import historygen
import judgement
import survival
import speech
import groups
import combat
import camps
import sight
import brain
import zones
import bad_numbers
import logging
import random
MAX_INFLUENCE_FROM = 80
MAX_INTROVERSION = 10
MAX_CHARISMA = 9
def init(life):
life['stats'] = historygen.create_background(life)
#life['stats']['charisma'] = random.randint(1, MAX_CHARISMA)
def desires_job(life):
#TODO: We recalculate this, but the answer is always the same.
_wont = brain.get_flag(life, 'wont_work')
if life['job'] or _wont:
if _wont:
_wont = brain.flag(life, 'wont_work', value=_wont-1)
return False
if not life['stats']['lone_wolf']:
return True
brain.flag(life, 'wont_work', value=1000)
return False
def desires_life(life, life_id):
if not lfe.execute_raw(life, 'judge', 'factors', life_id=life_id):
return False
return True
def desires_interaction(life):
if not lfe.execute_raw(life, 'talk', 'desires_interaction'):
return False
return True
def desires_first_contact_with(life, life_id):
#print life['name'], LIFE[life_id]['name'],brain.knows_alife_by_id(life, life_id)['alignment']
if not brain.knows_alife_by_id(life, life_id)['alignment'] == 'neutral':
return False
if life['group'] and not groups.is_leader(life, life['group'], life['id']):
#Don't talk if we're in a group and near our leader.
#TODO: #judgement Even then, we should consider having group members avoid non-members regardless.
#TODO: #judgement How do group types play into this?
_leader = brain.knows_alife_by_id(life, groups.get_leader(life, life['group']))
if _leader:
#TODO: #judgement Placeholder for future logic.
if bad_numbers.distance(life['pos'], _leader['life']['pos']) < 100:
return False
if life['stats']['motive_for_crime']>=4:
return True
if life['stats']['sociability']>=6:
return True
return False
def desires_conversation_with(life, life_id):
_knows = brain.knows_alife_by_id(life, life_id)
if not _knows:
logging.error('FIXME: Improperly Used Function: Doesn\'t know talking target.')
return False
if not lfe.execute_raw(life, 'talk', 'desires_conversation_with', life_id=life_id):
return False
if not judgement.can_trust(life, life_id):
return False
return True
def desires_to_create_group(life):
if life['group']:
return False
if not lfe.execute_raw(life, 'group', 'create_group'):
return False
return True
def wants_to_abandon_group(life, group_id):
_trusted = 0
_hostile = 0
for member in groups.get_group(life, group_id)['members']:
if life['id'] == member:
continue
_knows = brain.knows_alife_by_id(life, member)
if _knows['alignment'] == 'hostile':
_hostile += 1
else:
_trusted += 1
return _hostile>_trusted
def desires_group(life, group_id):
if life['group']:
return wants_to_abandon_group(life, life['group'], with_new_group_in_mind=group_id)
if judgement.judge_group(life, group_id)>get_minimum_group_score(life):
return True
return False
def desires_to_create_camp(life):
if not 'CAN_GROUP' in life['life_flags']:
return False
if life['group'] and not groups.get_camp(life['group']) and groups.is_leader(life, life['group'], life['id']):
if len(groups.get_group(life, life['group'])['members'])>1:
return True
return False
def desires_help_from(life, life_id):
return judgement.can_trust(life, life_id) and judgement.get_tension_with(life, life_id)<=judgement.get_max_tension_with(life, life_id)
def desires_shelter(life):
if not lfe.execute_raw(life, 'discover', 'desires_shelter'):
return False
#TODO: Why?
if life['state'] == 'needs':
return False
return True
def desires_to_join_camp(life, camp_id):
if life['group']:
return False
if life['camp']:
print life['name'],'already has camp',camps.knows_founder(life, life['camp'])
return False
if life['stats']['lone_wolf']:
return False
_memories = lfe.get_memory(life, matches={'text': 'heard_about_camp', 'camp': camp_id, 'founder': '*'})
if _memories:
_memory = _memories.pop()
if not judgement.can_trust(life, _memory['founder']):
print life['name'],'Cant trust founder' * 10
return False
if lfe.get_memory(life, matches={'text': 'ask_to_join_camp', 'camp': camp_id}):
return False
return True
def desires_weapon(life):
if not combat.get_equipped_weapons(life):
return True
#if life['stats']['firearms'] >= 5:
return False
def battle_cry(life):
_battle_cry = lfe.execute_raw(life, 'talk', 'battle_cry')
if _battle_cry == 'action':
_battle_cry_action = lfe.execute_raw(life, 'talk', 'battle_cry_action')
lfe.say(life, _battle_cry_action, action=True)
def get_melee_skill(life):
return bad_numbers.clip((life['stats']['melee'])/10.0, 0.1, 1)
def get_firearm_accuracy(life):
return bad_numbers.clip((life['stats']['firearms'])/10.0, 0.35, 1)
def get_recoil_recovery_rate(life):
return bad_numbers.clip(life['stats']['firearms']/10.0, 0.4, 1)*.2
def get_antisocial_percentage(life):
return life['stats']['introversion']/float(MAX_INTROVERSION)
def get_minimum_group_score(life):
if life['group']:
return judgement.judge_group(life, life['group'])
return 0
def get_employability(life):
#TODO: Placeholder
return 50
def get_group_motive(life):
if life['stats']['motive_for_crime'] >= 6:
if life['stats']['motive_for_wealth'] >= 5:
return 'wealth'
return 'crime'
if life['stats']['motive_for_wealth'] >= 5:
return 'wealth'
return 'survival'
def get_minimum_camp_score(life):
if life['group'] and groups.is_leader(life, life['group'], life['id']):
return len(groups.get_group(life, life['group'])['members'])
return 3
def wants_group_member(life, life_id):
if not life['group']:
return False
if groups.is_member(life, life['group'], life_id):
return False
if not groups.is_leader(life, life['group'], life['id']):
return False
if not lfe.execute_raw(life, 'group', 'wants_group_member', life_id=life_id):
return False
_know = brain.knows_alife_by_id(life, life_id)
if not _know:
return False
if not judgement.can_trust(life, life_id):
return False
return True
def will_obey(life, life_id):
_know = brain.knows_alife_by_id(life, life_id)
if not _know:
return False
if judgement.can_trust(life, life_id):
return True
return False
def can_talk_to(life, life_id):
if LIFE[life_id]['asleep'] or LIFE[life_id]['dead']:
return False
if not lfe.execute_raw(life, 'talk', 'can_talk_to', life_id=life_id):
return False
return True
def can_camp(life):
if not lfe.execute_raw(life, 'camp', 'can_camp'):
return False
return True
def can_create_camp(life):
if not lfe.execute_raw(life, 'camp', 'can_create_camp'):
return False
return True
def can_bite(life):
_melee_limbs = lfe.get_melee_limbs(life)
if not _melee_limbs:
return False
for limb in _melee_limbs:
if 'CAN_BITE' in lfe.get_limb(life, limb)['flags']:
return limb
return None
def can_scratch(life):
_melee_limbs = lfe.get_melee_limbs(life)
if not _melee_limbs:
print life['name'],'no melee limbs'
return False
for limb in _melee_limbs:
if 'SHARP' in lfe.get_limb(life, limb)['flags']:
return limb
print life['name'],'cant scratch'
return None
def is_nervous(life, life_id):
if not lfe.execute_raw(life, 'judge', 'nervous', life_id=life_id):
return False
_dist = bad_numbers.distance(life['pos'], LIFE[life_id]['pos'])
if _dist <= sight.get_vision(LIFE[life_id])/2:
return True
return False
def is_aggravated(life, life_id):
if lfe.execute_raw(life, 'judge', 'aggravated', life_id=life_id):
return True
return False
def is_incapacitated(life):
_size = sum([lfe.get_limb(life, l)['size'] for l in life['body']])
_count = 0
for limb in life['body']:
_count += lfe.limb_is_cut(life, limb)
_count += lfe.get_limb_pain(life, limb)
if (_count/float(_size))>=.35:
return True
return False
def is_intimidated_by(life, life_id):
if lfe.execute_raw(life, 'safety', 'intimidated', life_id=life_id):
return True
return False
def is_intimidated(life):
#for target_id in judgement.get_targets(life, ignore_escaped=True):
# if is_intimidated_by(life, target_id):
# return True
for target_id in judgement.get_threats(life, ignore_escaped=True):
if is_intimidated_by(life, target_id):
return True
return False
def is_injured(life):
return len(lfe.get_bleeding_limbs(life)) > 0
def is_confident(life):
if 'player' in life:
return False
_friendly_confidence = judgement.get_ranged_combat_rating_of_target(life, life['id'])
_threat_confidence = 0
for target_id in judgement.get_trusted(life, visible=False):
_knows = brain.knows_alife_by_id(life, target_id)
if _knows['dead'] or _knows['asleep']:
continue
if _knows['last_seen_time']>30:
if brain.get_alife_flag(life, target_id, 'threat_score'):
_recent_mod = 1-(bad_numbers.clip(_knows['last_seen_time'], 0, 300)/300.0)
_score = brain.get_alife_flag(life, target_id, 'threat_score')
_friendly_confidence += _score*_recent_mod
else:
_friendly_confidence += 1
else:
_score = judgement.get_ranged_combat_rating_of_target(life, target_id)
brain.flag_alife(life, target_id, 'threat_score', value=_score)
_friendly_confidence += _score
for target_id in judgement.get_threats(life, ignore_escaped=False):
_knows = brain.knows_alife_by_id(life, target_id)
if _knows['dead'] or _knows['asleep']:
continue
if _knows['last_seen_time']:
if brain.get_alife_flag(life, target_id, 'threat_score'):
if _knows['last_seen_time']>50:
_recent_mod = 1-(bad_numbers.clip(_knows['last_seen_time'], 0, 600)/600.0)
else:
_recent_mod = 1
_score = brain.get_alife_flag(life, target_id, 'threat_score')
_threat_confidence += _score*_recent_mod
else:
_threat_confidence += 1
else:
_score = judgement.get_ranged_combat_rating_of_target(life, target_id, inventory_check=False)
brain.flag_alife(life, target_id, 'threat_score', value=_score)
_threat_confidence += _score
return _friendly_confidence-_threat_confidence>=-2
def is_threat_too_close(life):
_nearest_threat = judgement.get_nearest_threat(life)
if not _nearest_threat:
return False
_knows = brain.knows_alife_by_id(life, _nearest_threat)
if not _nearest_threat:
return False
if _knows['last_seen_time'] >= 100:
return False
_danger_close_range = int(lfe.execute_raw(life, 'safety', 'danger_close_range'))
if bad_numbers.distance(life['pos'], _knows['last_seen_at'])<_danger_close_range:
return True
return False
def has_threat_in_combat_range(life):
_engage_distance = combat.get_engage_distance(life)
for target_id in judgement.get_threats(life):
_target = brain.knows_alife_by_id(life, target_id)
if bad_numbers.distance(life['pos'], _target['last_seen_at']) <= _engage_distance:
return True
return False
def is_same_species(life, life_id):
if life['species'] == LIFE[life_id]['species']:
return True
return False
def is_family(life, life_id):
_know = brain.knows_alife_by_id(life, life_id)
if not _know:
return False
for relation in ['son', 'daughter', 'mother', 'father', 'sibling']:
if brain.get_alife_flag(life, life_id, relation):
return True
return False
def is_child_of(life, life_id):
_know = brain.knows_alife_by_id(life, life_id)
if not _know:
return False
if not _know['escaped'] and _know['life']['dead']:
return False
for relation in ['mother', 'father']:
if brain.get_alife_flag(life, life_id, relation):
return True
return False
def is_parent_of(life, life_id):
_know = brain.knows_alife_by_id(life, life_id)
if not _know:
return False
for relation in ['son', 'daughter']:
if brain.get_alife_flag(life, life_id, relation):
return True
return False
def has_parent(life):
for life_id in life['know'].keys():
if is_child_of(life, life_id):
return True
return False
def has_child(life):
for life_id in life['know'].keys():
if is_parent_of(life, life_id):
return True
return False
def is_safe_in_shelter(life, life_id):
if not lfe.is_in_shelter(life):
return True
return True
def is_born_leader(life):
return life['stats']['is_leader']
def is_psychotic(life):
return life['stats']['psychotic']
def _has_attacked(life, life_id, target_list):
for memory in lfe.get_memory(life, matches={'text': 'heard about attack', 'attacker': life_id}):
if memory['target'] in target_list:
return True
return False
def has_attacked_trusted(life, life_id):
return _has_attacked(life, life_id, judgement.get_trusted(life))
def has_attacked_self(life, life_id):
return len(lfe.get_memory(life, matches={'text': 'shot_by', 'target': life_id}))>0
def react_to_attack(life, life_id):
_knows = brain.knows_alife_by_id(life, life_id)
if not _knows['alignment'] == 'hostile':
speech.start_dialog(life, _knows['life']['id'], 'establish_hostile')
if life['group']:
groups.announce(life,
life['group'],
'attacked_by_hostile',
target_id=_knows['life']['id'],
filter_if=lambda life_id: brain.knows_alife_by_id(life, life_id)['last_seen_time']<=30,
ignore_if_said_in_last=150)
def react_to_tension(life, life_id):
if brain.knows_alife_by_id(life, life_id)['alignment'] in ['hostile']:
return False
if life['group'] and not groups.is_leader(life, life['group'], life['id']) and groups.get_leader(life, life['group']):
if sight.can_see_target(life, groups.get_leader(life, life['group'])) and sight.can_see_target(LIFE[life_id], groups.get_leader(life, life['group'])):
return False
_disarm = brain.get_alife_flag(life, life_id, 'disarm')
if _disarm:
#For now...
if not sight.can_see_position(life, LIFE[life_id]['pos']):
groups.announce(life,
life['group'],
'attacked_by_hostile',
filter_if=lambda life_id: brain.knows_alife_by_id(life, life_id)['last_seen_time']<=30,
target_id=life_id)
return False
for item_uid in lfe.get_all_visible_items(LIFE[life_id]):
if ITEMS[item_uid]['type'] == 'gun':
break
else:
brain.unflag_alife(life, life_id, 'disarm')
speech.start_dialog(life, life_id, 'clear_drop_weapon')
return False
_time_elapsed = WORLD_INFO['ticks']-_disarm
if _time_elapsed>135 and not speech.has_sent(life, life_id, 'threaten'):
speech.start_dialog(life, life_id, 'threaten')
speech.send(life, life_id, 'threaten')
elif _time_elapsed>185:
speech.start_dialog(life, life_id, 'establish_hostile')
elif not speech.has_sent(life, life_id, 'confront'):
speech.start_dialog(life, life_id, 'confront')
speech.send(life, life_id, 'confront')
def ask_for_help(life, life_id):
_bleeding_limbs = len(lfe.get_bleeding_limbs(life))
if not speech.has_sent(life, life_id, 'hurt'):
speech.start_dialog(life, life_id, 'hurt')
speech.send(life, life_id, 'hurt')
def wants_alignment_change(life, life_id):
_target = brain.knows_alife_by_id(life, life_id)
for memory in lfe.get_memory(life, matches={'text': 'healed_by'}):
if memory['target'] == life_id:
if _target['alignment'] == 'feign_trust':
return 'trust'
return None
def distance_from_pos_to_pos(life, pos1, pos2):
return bad_numbers.distance(pos1, pos2)
def get_goal_alignment_for_target(life, life_id):
_genuine = 100
_malicious = 100
if is_psychotic(life):
if life['group']:
if not life['group'] == LIFE[life_id]['group']:
return 'hostile'
else:
return 'hostile'
_malicious*=life['stats']['motive_for_crime']/10.0
if life['stats']['lone_wolf']:
_malicious*=.65
_genuine*=.65
if life['stats']['self_absorbed']:
_malicious*=.85
if not _genuine>=50 and not _malicious>=50:
return False
if _malicious>=75 and _genuine>=75:
return 'feign_trust'
if _genuine>_malicious:
return 'trust'
return 'hostile'
def change_alignment(life, life_id, alignment):
_knows = brain.knows_alife_by_id(life, life_id)
if not _knows:
brain.meet_alife(life, LIFE[life_id])
_knows = brain.knows_alife_by_id(life, life_id)
logging.debug('%s changed alignment of %s: %s' % (' '.join(life['name']), ' '.join(LIFE[life_id]['name']), alignment))
_knows['alignment'] = alignment
def establish_trust(life, life_id):
change_alignment(life, life_id, 'trust')
def establish_feign_trust(life, life_id):
change_alignment(life, life_id, 'feign_trust')
def establish_aggressive(life, life_id):
change_alignment(life, life_id, 'aggressive')
def establish_hostile(life, life_id):
change_alignment(life, life_id, 'hostile')
def establish_scared(life, life_id):
change_alignment(life, life_id, 'scared')
def declare_group_target(life, target_id, alignment):
change_alignment(life, target_id, alignment)
groups.announce(life, life['group'], 'add_group_target', target_id=target_id)
def declare_group(life, group_id, alignment):
groups.update_group_memory(life, group_id, 'alignment', alignment)
for member in groups.get_group_memory(life, group_id, 'members'):
change_alignment(life, member, alignment)
logging.debug('%s declared group %s %s.' % (' '.join(life['name']), group_id, alignment))
def declare_group_trusted(life, group_id):
declare_group(life, group_id, 'trust')
def declare_group_hostile(life, group_id):
declare_group(life, group_id, 'hostile')
def declare_group_scared(life, group_id):
declare_group(life, group_id, 'scared') |
984,143 | a3c90dadde7eb638e311d186ce139a916f8dd8d8 | import torch
from pytracking.features.featurebase import FeatureBase, MultiFeatureBase
from pytracking import TensorList
import pdb
import numpy as np
class Motion(MultiFeatureBase):
"""Motion feature normalized to [-0.5, 0.5]."""
def dim(self):
return 1
def initialize(self):
if isinstance(self.pool_stride, int) and self.pool_stride == 1:
self.pool_stride = [1]
def stride(self):
ss = getattr(self.fparams,'feature_params')[0].cell_size
return TensorList([s * ss for s in self.pool_stride])
def extract(self, im: torch.Tensor):
thresh_im = im[:,3:6,...]/255 - im[:,6:,...]/255
thresh_im = torch.abs(thresh_im)
binary_im = thresh_im > fparam.threshold
thresh_im = binary_im.float()
thresh_feature_scale = 5.0
thresh_im = thresh_feature_scale * thresh_im
thresh_im = average_feature_region(thresh_im, cell_size)
return TensorList([thresh_im])
def extract_comb(self, im: torch.Tensor):
im = im.cuda()
thresh_im = im[:,3:4,...]/255 - im[:,6:7,...]/255
thresh_im = torch.abs(thresh_im)
threshold = getattr(self.fparams,'feature_params')[0].threshold
binary_im = thresh_im > threshold
thresh_im = binary_im.float()
thresh_feature_scale = getattr(self.fparams,'feature_params')[0].thresh_feature_scale
thresh_im = thresh_feature_scale * thresh_im
cell_size = getattr(self.fparams,'feature_params')[0].cell_size
thresh_im = average_feature_region(thresh_im, cell_size)
return TensorList([thresh_im])
def average_feature_region(im, region_size):
region_area = region_size**2
maxval = 1.0
iImage = integralVecImage(im)
# region indices
#i1 = [*range(region_size, im.size(2), region_size)]
#i2 = [*range(region_size, im.size(3), region_size)]
i1 = np.arange(region_size, iImage.size(2), region_size)
i2 = np.arange(region_size, iImage.size(3), region_size)
i1_ = i1-region_size; i2_ = i2-region_size
region_image = (iImage[:,:,i1,:][...,i2] - iImage[:,:,i1,:][...,i2_] - iImage[:,:,i1_,:][...,i2] + iImage[:,:,i1_,:][...,i2_]) / (region_area * maxval)
#region_image = (iImage[:,:,i1,i2] - iImage[:,:,i1,i2-region_size] - iImage[:,:,i1-region_size,i2] + iImage[:,:,i1-region_size,i2-region_size]) / (region_area * maxval)
return region_image
def integralVecImage(I):
intImage = I.new_zeros(I.size(0), I.size(1), I.size(2)+1, I.size(3)+1) # , dtype=I.dtype
intImage[:, :, 1:, 1:] = I.cumsum(2).cumsum(3)
return intImage
|
984,144 | 9120d731e916c8137443a1de9007d34595f511c6 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 16 15:50:32 2015
@author: liuweizhi
"""
## version 1
n=input();c=[0]*(n+1)
a=sorted([0]+map(int,raw_input().split()));b=list(set(a));
for i in range(len(b)):c[i]=a.count(b[i])
print [sum(1 for i in c[1:] if i==2),-1][max(c[1:]+[0])>2]
## version 2
n,list=input(),map(int,raw_input().split())
a=[list.count(i) for i in set(list) if i>0]
print [a.count(2),-1][max(a+[0])>2]
|
984,145 | 6d1a95ffeb6c02bb6556d60d119cbaf2f97ff5ee | from confluent_kafka import Consumer, KafkaError
def create_consumer():
return Consumer({
'bootstrap.servers': 'localhost:9092',
'group.id': 'example.group',
'default.topic.config': {
'auto.offset.reset': 'smallest'
}
})
def consume_topic(topic, callback):
consumer = create_consumer()
consumer.subscribe([topic])
while True:
message = consumer.poll()
if not message.error():
callback(message.value())
elif message.error().code() != KafkaError._PARTITION_EOF:
print(message.error())
break
consumer.close()
|
984,146 | e5db5b97c3219bb91dd9e3b5d623f51d41d6fa72 | # Number Guessing Game.
# Modules
import random
import os
import sys
# Variables
i = 0
clear = lambda: os.system("cls")
# Computer will choose Random number 0 < n < 20.
computer = random.randint(0, 20)
print("Im thinking of a number between 0 and 20...")
# Checks Input and comapares with the AI.
while i <= 5:
i += 1
userInput = int(input())
print(f"Chances: {6 - i}")
if userInput < computer:
print(f"Greater than {userInput}")
elif userInput > computer:
print(f"Less than {userInput}")
else:
print(f"Correct Computer is {computer} and you choose {userInput}")
break
# Restart
choice = str(input("Would you like to play again (y/n)\n"))
if choice == "y" or "Y":
clear()
os.system("GuessGame.py")
# TODO Restart game if user wanted. still not fixed ...
|
984,147 | 241078fe066ab39f29b8aaa8c583da5c789e0703 |
##### 가우시안블러 처리해서 차이점 시각화만 시켜놓은거
import cv2
import numpy as np
cap = cv2.VideoCapture("highway.mp4") # highway
# video 가져와서 gray scale 처리 + 가우시안 블러처리 => 노이즈제거
_, first_frame = cap.read()
# /home/chun/PycharmProjects/opencv_project/main.py
# 파일경로설정을 제대로 안하면 여기서 에러가 날 수 있다.
first_gray = cv2.cvtColor(first_frame, cv2.COLOR_BGR2GRAY)
first_gray = cv2.GaussianBlur(first_gray, (5, 5), 0)
# frame을 하나하나 가져오는 while loop 를 돈다.
while True:
_, frame = cap.read()
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_frame = cv2.GaussianBlur(gray_frame, (5, 5), 0)
# core part
# first frame 과 완전히 다른점 계산
difference = cv2.absdiff(first_gray, gray_frame)
_, difference = cv2.threshold(difference, 30, 255, cv2.THRESH_BINARY) # 두번째 인수가 노이즈 처리하는 threshold
##### 되나??
_, contours, hierarchy = cv2.findContours(difference, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if (area > 500):
x, y, w, h = cv2.boundingRect(contour) # 이 함수 써서 좌표추출
img = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2) # 얻어낸 좌표를 통해 빨간 박스처리
cv2.imshow("difference", img)
# roi_vehchile = frame[y:y - 10 + h + 5, x:x - 8 + w + 10]
# cv2.imshow("First frame", first_frame)
# cv2.imshow("Frame", frame)
# cv2.imshow("difference", difference)
key = cv2.waitKey(20) # cv2.waitKey()는 ESC를 누르면 27을 RETURN
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
|
984,148 | 970017f1eef654cdeffcedf3c035f0463659c0a0 | import os
import _pickle as cPickle # for python 3.x
# import cPickle # for python 2.x
import numpy as np
from scipy.io.wavfile import read
from sklearn import mixture
import python_speech_features as mfcc
from sklearn import preprocessing
import warnings
warnings.filterwarnings("ignore")
def get_MFCC(sr,audio):
features = mfcc.mfcc(audio,sr, 0.025, 0.01, 13,appendEnergy = False)
features = preprocessing.scale(features)
return features
#path to training data
source = "pygender\\train_data\\youtube\\female\\"
# source = "pygender\\train_data\\youtube\\male\\"
#path to save trained model
dest = "pygender\\"
files = [os.path.join(source,f) for f in os.listdir(source) if
f.endswith('.wav')]
features = np.asarray(());
for f in files:
sr,audio = read(f)
vector = get_MFCC(sr,audio)
if features.size == 0:
features = vector
else:
features = np.vstack((features, vector))
gmm = mixture.GaussianMixture(n_components = 8, max_iter = 200, covariance_type='diag',
n_init = 3)
gmm.fit(features)
picklefile = f.split("\\")[-2].split(".wav")[0]+".gmm"
# model saved as male.gmm
#cPickle.dump(gmm, open(dest + picklefile, 'w'))
cPickle.dump(gmm,open(dest + picklefile,'wb'))
print('modeling completed for gender:',picklefile)
|
984,149 | babb79cae348f557d9136910799628d5dbb13991 | import torch
from torch import nn
class ONet(nn.Module):
def __init__(self, class_num):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, class_num) # age
self.training = True
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
return a
def raspnet(**kwargs):
if kwargs['name'] == 'onet_a':
model = ONet(kwargs['class_num'])
else:
raise NotImplementedError
return model
if __name__ == '__main__':
pass
|
984,150 | 0fee519b4717be9ec93f4989d7a7126505c15fbc | # -*- coding=utf-8
import cos_client
import logging
import random
import sys
import os
reload(sys)
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format="%(asctime)s - %(message)s")
access_id = os.environ["ACCESS_ID"]
access_key = os.environ["ACCESS_KEY"]
test_num = 2
file_id = str(random.randint(0, 1000)) + str(random.randint(0, 1000))
def setUp():
print "Test interface"
def tearDown():
print "test over"
def gen_file(path, size):
_file = open(path, 'w')
_file.seek(1024*1024*size)
_file.write('\x00')
_file.close()
def Test():
for i in range(test_num):
bucket_id = str(random.randint(0, 1000)) + str(random.randint(0, 1000))
conf = cos_client.CosConfig(
appid="1252448703",
bucket="test" + str(bucket_id),
region="cn-north",
access_id=access_id,
access_key=access_key,
part_size=1,
max_thread=5)
client = cos_client.CosS3Client(conf)
op_int = client.op_int()
print "Test create bucket " + conf._bucket
sys.stdout.flush()
rt = op_int.create_bucket()
assert rt
print "Test get bucket " + conf._bucket
sys.stdout.flush()
rt = op_int.get_bucket()
assert rt
print "Test put bucket acl " + conf._bucket
sys.stdout.flush()
rt = op_int.put_bucket_acl("anyone,43,123", None, "anyone")
assert rt
print "Test get bucket acl " + conf._bucket
sys.stdout.flush()
rt = op_int.get_bucket_acl()
assert rt
print "Test delete bucket " + conf._bucket
sys.stdout.flush()
rt = op_int.delete_bucket()
assert rt
conf = cos_client.CosConfig(
appid="1252448703",
bucket="lewzylu06",
region="cn-north",
access_id=access_id,
access_key=access_key,
part_size=1,
max_thread=5
)
client = cos_client.CosS3Client(conf)
op_int = client.op_int()
file_size = 5.1 * i + 0.1
file_name = "tmp" + file_id + "_" + str(file_size) + "MB"
print "Test upload " + file_name
sys.stdout.flush()
gen_file(file_name, file_size)
rt = op_int.upload_file(file_name, file_name)
assert rt
print "Test put object acl " + file_name
sys.stdout.flush()
rt = op_int.put_object_acl("anyone,43,123", None, "anyone", file_name)
assert rt
print "Test get object acl " + file_name
sys.stdout.flush()
rt = op_int.get_object_acl(file_name)
assert rt
print "Test download " + file_name
sys.stdout.flush()
rt = op_int.download_file(file_name, file_name)
assert rt
os.remove(file_name)
print "Test delete " + file_name
sys.stdout.flush()
rt = op_int.delete_file(file_name)
assert rt
if __name__ == "__main__":
setUp()
Test()
|
984,151 | fc9de652724dd8364b5dd7fd47a10a09e3aefdd2 | """
This is a boilerplate pipeline 'motion_regression'
generated using Kedro 0.17.6
"""
from kedro.pipeline import Pipeline, node
from .nodes import (
predict_force,
fit_motions,
create_model_from_motion_regression,
motion_regression_summaries,
motion_regression_plots,
)
def create_pipeline(**kwargs):
return Pipeline(
[
node(
func=predict_force,
inputs=[
"data_ek_smooth",
"added_masses",
"ship_data",
"vmm",
],
outputs="data_with_force",
name="predict_force_node",
tags=["motion_regression"],
),
node(
func=fit_motions,
inputs=[
"data_with_force",
"added_masses",
"ship_data",
"vmm",
"params:motion_regression.exclude_parameters",
],
outputs=["regression", "derivatives"],
name="fit_motions_node",
tags=["motion_regression"],
),
node(
func=motion_regression_summaries,
inputs=["regression"],
outputs=[
"summary_X",
"summary_Y",
"summary_N",
],
name="motion_regression_summaries_node",
tags=["motion_regression"],
),
node(
func=motion_regression_plots,
inputs=["regression"],
outputs=[
"plot_X",
"plot_Y",
"plot_N",
],
name="motion_regression_plots_node",
tags=["motion_regression"],
),
node(
func=create_model_from_motion_regression,
inputs=["regression"],
outputs="model",
name="create_model_from_motion_regression_node",
tags=["motion_regression"],
),
]
)
|
984,152 | 2d7fd4bd79bcfdd8fe6d41de27c72b53b7640e8d | import random
from unittest import TestCase
from nose_parameterized import parameterized
from find_mode_in_binary_search_tree import Solution
from utils_tree import construct_tree
class TestFindModeInBST(TestCase):
@parameterized.expand([
[
construct_tree([]),
[],
],
[
construct_tree([[1], [1, 1]]),
[1],
],
[
construct_tree([[3], [3, 3], [2, None, 2]]),
[3],
],
[
construct_tree(
[[3], [3, 3], [2, 1, 2, 1], [2, 1, None, None, 2, 1]]
),
[1, 2],
],
])
def test_find_mode(self, root, expected_ans):
# Setup
sol = Solution()
# Exercise
ans = sol.find_mode(root)
# Verify
self.assertEqual(sorted(ans), sorted(expected_ans))
|
984,153 | ee516cb00d72b73cf86c590757da91e389f68db6 | # Copyright (c) 2019 Zijun Wei.
# Licensed under the MIT License.
# Author: Zijun Wei
# Usage(TODO):
# Email: hzwzijun@gmail.com
# Created: 16/Feb/2019 12:19
import os, sys
project_root = os.path.join(os.path.expanduser('~'), 'Dev/AttributeNet3')
sys.path.append(project_root)
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
import tqdm
from PyUtils.file_utils import get_dir
from PyUtils.pickle_utils import loadpickle
from gensim import models
import random
random.seed(0)
user_root = os.path.expanduser('~')
training_data = loadpickle(os.path.join(user_root, 'Dev/AttributeNet3/LanguageData/raw_tag_sentences.pkl'))
save_directory = get_dir( os.path.join(user_root, 'Dev/AttributeNet3/LanguageData/word2vec_models'))
training_sentences = training_data['data']
max_len = training_data['max_len']
embedding_dim = 300
window_size = max_len
texts = []
shuffle_times = 10
def random_drop(s_text, drop_rate=0.1):
updated_text = []
for s_tag in s_text:
p = random.uniform(0, 1)
if p >= drop_rate:
updated_text.append(s_tag)
return updated_text
for s_cid in tqdm.tqdm(training_sentences, total=len(training_sentences), desc="Creating texts for trianing"):
s_text = training_sentences[s_cid]
aug_times = 0
while aug_times < shuffle_times:
random.shuffle(s_text)
random_drop_text = random_drop(s_text)
texts.append(random_drop_text)
aug_times += 1
print("Training on {} Sentences, shuffle rate {}".format(len(texts), shuffle_times))
model = models.Word2Vec(texts, size=embedding_dim, window=window_size, workers=12, sg=1, hs=0, negative=15, iter=10, alpha=0.025, min_count=100)
model.save(os.path.join(save_directory, "word2vec_raw_shuffle.model"))
print("Done")
|
984,154 | a118a3c4052a6db18188d161a7153629ca7d19aa | # Generated by Django 2.2.5 on 2019-11-06 07:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('VCS', '0004_auto_20191105_2024'),
]
operations = [
migrations.AlterField(
model_name='teach',
name='File',
field=models.ImageField(null=True, upload_to='Taokhoa/'),
),
]
|
984,155 | c692a0d961c8d4752d331bb8d65bdb4998bffcf2 | import math
n = int(input())
i = 0;
while i<n:
x1,y1,r1,x2,y2,r2 = map(int, input().split())
d = math.sqrt(math.pow(x2-x1, 2)+math.pow(y2-y1,2))
if x1==x2 and y1 == y2:
if r1 == r2:
print(-1)
else:
print(0)
else:
if r2 > r1:
t = r1
r1 = r2
r2 = t
if d == r1 + r2:
print(1)
elif r1 - r2 < d < r1 + r2:
print(2)
elif d > r1 + r2:
print(0)
elif d == r1 - r2:
print(1)
elif d < r1 - r2:
print(0)
i+=1 |
984,156 | 3b67a36b50ecc19aaa152eafc1994694c47782f3 | #!/usr/bin/env python3
import logging
import socket
from functools import partial
from time import sleep
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('address', nargs='+')
parser.add_argument('-t', '--timeout', default=600, type=int)
parser.add_argument('--period', default=0.5, type=float)
def check_port(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
return sock.connect_ex((host, port)) == 0
finally:
sock.close()
def main():
arguments = parser.parse_args()
endpoints = set()
for service in arguments.address:
host, port = service.rsplit(":", 1)
port = int(port)
host = host.strip('[]').strip()
endpoints.add(partial(check_port, host, port))
for _ in range(int(arguments.timeout / arguments.period)):
result = list()
for checker in endpoints:
result.append(checker())
if all(result):
return 0
sleep(arguments.period)
logging.fatal("Service awaiting timeout")
return 10
if __name__ == '__main__':
exit(main())
|
984,157 | 2654faa1425695c1f97d20ca68166f44ce473784 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2022 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import copy
import logging
import operator
from functools import reduce
from typing import Any, Dict, List, Optional, Set, Union
from django.conf import settings
from django.core.paginator import Paginator
from django.db.models import Q
from django.db import transaction
from django.utils import timezone
from django.utils.translation import get_language
from django.utils.translation import ugettext_lazy as _
from apps.backend.subscription.errors import SubscriptionTaskNotReadyError
from apps.exceptions import ApiResultError
from apps.node_man import constants, exceptions, models, tools
from apps.node_man.handlers import validator
from apps.node_man.handlers.ap import APHandler
from apps.node_man.handlers.cloud import CloudHandler
from apps.node_man.handlers.cmdb import CmdbHandler
from apps.node_man.handlers.host import HostHandler
from apps.utils import APIModel
from apps.utils.basic import filter_values, to_int_or_default
from apps.utils.local import get_request_username
from apps.utils.time_tools import local_dt_str2utc_dt
from common.api import NodeApi
logger = logging.getLogger("app")
class JobHandler(APIModel):
def __init__(self, job_id=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.job_id = job_id
def _get_data(self) -> models.Job:
try:
return models.Job.objects.get(pk=self.job_id)
except models.Job.DoesNotExist:
raise exceptions.JobDostNotExistsError(_("不存在ID为{job_id}的任务").format(job_id=self.job_id))
def ugettext_to_unicode(self, ip_filter_list: list):
"""
针对ip_filter_list里的ugettext_lazy做字符串化的操作
:param ip_filter_list: ip过滤列表
:return: 格式化后的ip过滤列表
"""
# ugettext_lazy需要转为unicode才可进行序列化
for filter_info in ip_filter_list:
filter_info["msg"] = str(filter_info["msg"])
return ip_filter_list
def check_ap_and_biz_scope(self, node_type: str, host: dict, cloud_info: dict):
"""
返回主机的接入点、业务范围、节点类型。
兼容订阅任务版注册<br>
如果是非直连区域,获得该管控区域下的ap_id为空<br>
如果是直连区域,ap_id直接从参数拿, 如果是proxy,则ap_id为空<br>
:param node_type: 节点类型
:param host: 主机信息
:param cloud_info: 管控区域信息
:return:
"""
if node_type == constants.NodeType.AGENT:
if host["bk_cloud_id"] == constants.DEFAULT_CLOUD:
# 根据bk_cloud_id判断是否为AGENT
host_ap_id = host["ap_id"]
host_node_type = constants.NodeType.AGENT
else:
# 根据bk_cloud_id判断是否为PAGENT
# 如果传了ap_id优先使用传入的以适配重载配置
host_ap_id = host.get("ap_id") or cloud_info.get(host["bk_cloud_id"], {}).get("ap_id", "")
host_node_type = constants.NodeType.PAGENT
else:
# PROXY
# 如果传了ap_id优先使用传入的以适配重载配置
host_ap_id = host.get("ap_id") or cloud_info.get(host["bk_cloud_id"], {}).get("ap_id", "")
host_node_type = constants.NodeType.PROXY
return host_ap_id, host_node_type
def get_commands(self, request_bk_host_id: int, is_uninstall: bool):
"""
获取命令
:param request_bk_host_id: 主机ID
:param is_uninstall: 是否为卸载
:return: 主机ID,命令列表, 提示信息
"""
job = self.data
try:
host: models.Host = models.Host.objects.get(bk_host_id=request_bk_host_id)
except models.Host.DoesNotExist:
raise exceptions.HostNotExists()
host_id__pipeline_id_map: Dict[int, str] = {}
# 主机ID - 订阅实例ID映射
host_id__sub_inst_id_map: Dict[int, int] = {}
# 获取任务状态
task_result = NodeApi.get_subscription_task_status(
{"subscription_id": job.subscription_id, "task_id_list": job.task_id_list, "return_all": True}
)
for result in task_result["list"]:
bk_host_id = result["instance_info"]["host"].get("bk_host_id")
if not bk_host_id:
# 主机没注册成功
continue
host_id__sub_inst_id_map[bk_host_id] = result["record_id"]
# 获取每台主机安装任务的pipeline_id
sub_steps = result["steps"][0]["target_hosts"][0]["sub_steps"]
for step in sub_steps:
if step["node_name"] in ["安装", "卸载Agent", "Uninstall Agent", "Install"] and (
step["status"] in [constants.JobStatusType.RUNNING, constants.JobStatusType.SUCCESS]
):
pipeline_id = step["pipeline_id"]
host_id__pipeline_id_map[bk_host_id] = pipeline_id
if request_bk_host_id not in host_id__pipeline_id_map.keys():
return {"status": constants.JobStatusType.PENDING}
command_solutions: Dict[str, Union[List[str], str]] = NodeApi.fetch_commands(
{
"bk_host_id": host.bk_host_id,
"host_install_pipeline_id": host_id__pipeline_id_map[host.bk_host_id],
"is_uninstall": is_uninstall,
"sub_inst_id": host_id__sub_inst_id_map[host.bk_host_id],
}
)
return command_solutions
def list(self, params: dict, username: str):
"""
Job 任务历史列表
:param params: 请求参数的字典
:param username: 用户名
"""
kwargs = {
**tools.JobTools.parse_job_list_filter_kwargs(query_params=params),
"status__in": params.get("status"),
"created_by__in": params.get("created_by"),
"start_time__gte": params.get("start_time"),
"start_time__lte": params.get("end_time"),
"is_auto_trigger": False if params.get("hide_auto_trigger_job") else None,
}
# 获得业务id与名字的映射关系(用户有权限获取的业务)
all_biz_info = CmdbHandler().biz_id_name_without_permission()
biz_info = CmdbHandler().biz_id_name({"action": constants.IamActionType.task_history_view})
biz_permission = list(biz_info.keys())
if not biz_permission:
return {"total": 0, "list": []}
if params.get("job_id"):
job_ids = set()
for job_id_var in params["job_id"]:
# 获得合法整型
job_id = to_int_or_default(job_id_var)
if job_id is None:
continue
job_ids.add(job_id)
kwargs["id__in"] = job_ids
# 业务权限
search_biz_ids = params.get("bk_biz_id")
all_biz_ids = set(all_biz_info.keys())
if search_biz_ids:
# 字典的 in 比列表性能更高
biz_scope = [bk_biz_id for bk_biz_id in search_biz_ids if bk_biz_id in biz_info]
else:
biz_scope = biz_permission
if set(biz_scope) & all_biz_ids == all_biz_ids:
biz_scope_query_q = Q()
else:
biz_scope_query_q = reduce(
operator.or_,
[Q(bk_biz_scope__contains=bk_biz_id) for bk_biz_id in search_biz_ids],
Q()
)
# 仅查询所有业务时,自身创建的 job 可见
if not search_biz_ids:
biz_scope_query_q |= Q(created_by=username)
# ip 搜索
inner_ip_query_q = Q()
if params.get("inner_ip_list"):
instance_id_list = tools.JobTools.get_instance_ids_by_ips(params["inner_ip_list"])
# 处理时间范围
instance_record_query_kwargs = {
"instance_id__in": instance_id_list,
"create_time__gte": params.get("start_time"),
"create_time__lte": params.get("end_time"),
}
# subscription_id 查询更快,但使用 subscription_id 不够准确,subscription 的范围有变化的可能
task_id_list = models.SubscriptionInstanceRecord.objects.filter(
**filter_values(instance_record_query_kwargs)
).values_list("task_id", flat=True)
# 带了 ip 查询条件,如果没有该 ip 的任务,应当返回无数据
if not task_id_list:
return {"total": 0, "list": []}
inner_ip_query_q = reduce(operator.or_, [Q(task_id_list__contains=task_id) for task_id in task_id_list])
# 过滤None值并筛选Job
# 此处不过滤空列表(filter_empty=False),job_id, job_type 存在二次解析,若全部值非法得到的是空列表,期望应是查不到数据
job_result = models.Job.objects.filter(biz_scope_query_q, inner_ip_query_q, **filter_values(kwargs))
# 过滤没有业务的Job
job_result = job_result.filter(~Q(bk_biz_scope__isnull=True) & ~Q(bk_biz_scope={}))
# 排序
if params.get("sort"):
sort_head = params["sort"]["head"]
job_result = job_result.extra(select={sort_head: f"JSON_EXTRACT(statistics, '$.{sort_head}')"})
if params["sort"]["sort_type"] == constants.SortType.DEC:
job_result = job_result.order_by(str("-") + sort_head)
else:
job_result = job_result.order_by(sort_head)
# 可以接受 queryset 作为参数
paginator = Paginator(job_result.values(), params["pagesize"])
# 分页之后再转换为列表
job_page: List[Dict[str, Any]] = list(paginator.page(params["page"]).object_list)
# 填充策略名称
sub_infos = models.Subscription.objects.filter(
id__in=[job["subscription_id"] for job in job_page], show_deleted=True
).values("id", "name")
# 建立订阅ID和订阅详细信息的映射
sub_id__sub_info_map = {sub_info["id"]: sub_info for sub_info in sub_infos}
# 预处理数据:字段填充,计算等
for job in job_page:
job.update(tools.JobTools.unzip_job_type(job["job_type"]))
# 填充订阅相关信息
job["policy_name"] = sub_id__sub_info_map.get(job["subscription_id"], {}).get("name")
if not job["end_time"]:
job["cost_time"] = f'{(timezone.now() - job["start_time"]).seconds}'
else:
job["cost_time"] = f'{(job["end_time"] - job["start_time"]).seconds}'
job["bk_biz_scope_display"] = [all_biz_info.get(biz) for biz in job["bk_biz_scope"]]
job["job_type_display"] = constants.JOB_TYPE_DICT.get(job["job_type"])
# 使用分页器的 count,避免重复计算
return {"total": paginator.count, "list": job_page}
def install(
self,
hosts: List[Dict[str, Any]],
op_type: str,
node_type: str,
job_type: str,
ticket: str,
extra_params: Dict[str, Any],
extra_config: Dict[str, Any],
):
"""
Job 任务处理器
:param hosts: 主机列表
:param op_type: 操作类型
:param node_type: 节点类型
:param job_type: 任务作业类型
:param ticket: 请求参数的字典
:param extra_params: 额外的订阅参数
:param extra_config: 额外的订阅配置参数
"""
# 获取Hosts中的cloud_id列表、ap_id列表、内网、外网、登录IP列表、bk_biz_scope列表
ap_ids = set()
is_manual = set()
bk_biz_scope = set()
bk_cloud_ids = set()
inner_ips_info: Dict[str, Set[str]] = {"inner_ips": set(), "inner_ipv6s": set()}
for host in hosts:
bk_cloud_ids.add(host["bk_cloud_id"])
bk_biz_scope.add(host["bk_biz_id"])
is_manual.add(host["is_manual"])
# 遍历需要支持的 IP 字段,汇总安装信息中存在该 IP 字段的值
if host.get("inner_ip"):
inner_ips_info["inner_ips"].add(host["inner_ip"])
if host.get("inner_ipv6"):
inner_ips_info["inner_ipv6s"].add(host["inner_ipv6"])
# 用户ticket,用于后台异步执行时调用第三方接口使用
host["ticket"] = ticket
if host.get("ap_id"):
ap_ids.add(host["ap_id"])
# 如果混合了【手动安装】,【自动安装】则不允许通过
# 此处暂不合入 job validator.
if len(is_manual) > 1:
raise exceptions.MixedOperationError
else:
is_manual = list(is_manual)[0]
# 获得所有的业务列表
# 格式 { bk_biz_id: bk_biz_name , ...}
biz_info = CmdbHandler().biz_id_name_without_permission()
# 获得相应管控区域 id, name, ap_id
# 格式 { cloud_id: {'bk_cloud_name': name, 'ap_id': id}, ...}
cloud_info = CloudHandler().list_cloud_info(bk_cloud_ids)
# 获得接入点列表
# 格式 { id: name, ...}
ap_id_name = APHandler().ap_list(ap_ids)
# 获得用户输入的ip是否存在于数据库中
# 格式 { bk_cloud_id+ip: { 'bk_host_id': ..., 'bk_biz_id': ..., 'node_type': ...}}
host_infos_gby_ip_key: Dict[str, List[Dict[str, Any]]] = HostHandler.get_host_infos_gby_ip_key(
ips=inner_ips_info["inner_ips"], ip_version=constants.CmdbIpVersion.V4.value
)
host_infos_gby_ip_key.update(
HostHandler.get_host_infos_gby_ip_key(
ips=inner_ips_info["inner_ipv6s"], ip_version=constants.CmdbIpVersion.V6.value
)
)
# 对数据进行校验
# 重装则校验IP是否存在,存在才可重装
ip_filter_list, accept_list, proxy_not_alive = validator.install_validate(
hosts, op_type, node_type, job_type, biz_info, cloud_info, ap_id_name, host_infos_gby_ip_key
)
if proxy_not_alive:
raise exceptions.AliveProxyNotExistsError(
context="不存在可用代理", data={"job_id": "", "ip_filter": self.ugettext_to_unicode(proxy_not_alive)}
)
if not accept_list:
# 如果都被过滤了
raise exceptions.AllIpFiltered(data={"job_id": "", "ip_filter": self.ugettext_to_unicode(ip_filter_list)})
if any(
[
op_type in [constants.OpType.INSTALL, constants.OpType.REPLACE, constants.OpType.RELOAD],
# 开启动态主机配置协议适配时,通过基础信息进行重装
settings.BKAPP_ENABLE_DHCP and op_type in [constants.OpType.REINSTALL],
]
):
# 安装、替换Proxy操作
subscription_nodes = self.subscription_install(accept_list, node_type, cloud_info, biz_info)
subscription = self.create_subscription(
job_type, subscription_nodes, extra_params=extra_params, extra_config=extra_config
)
else:
# 重装、卸载等操作
# 此步骤需要校验密码、秘钥
subscription_nodes, ip_filter_list = self.update_host(accept_list, ip_filter_list, is_manual)
if not subscription_nodes:
raise exceptions.AllIpFiltered(
data={"job_id": "", "ip_filter": self.ugettext_to_unicode(ip_filter_list)}
)
subscription = self.create_subscription(
job_type, subscription_nodes, extra_params=extra_params, extra_config=extra_config
)
# ugettext_lazy 需要转为 unicode 才可进行序列化
ip_filter_list = self.ugettext_to_unicode(ip_filter_list)
create_job_result: Dict[str, Any] = tools.JobTools.create_job(
job_type=job_type,
subscription_id=subscription["subscription_id"],
task_id=subscription["task_id"],
bk_biz_scope=bk_biz_scope,
statistics={
"success_count": 0,
"failed_count": len(ip_filter_list),
"pending_count": len(subscription_nodes),
"running_count": 0,
"total_count": len(ip_filter_list) + len(subscription_nodes),
},
error_hosts=ip_filter_list,
)
return {**create_job_result, "ip_filter": ip_filter_list}
def subscription_install(self, accept_list: list, node_type: str, cloud_info: dict, biz_info: dict):
"""
Job 订阅安装任务
:param accept_list: 所有通过校验需要新安装的主机
:param node_type: 节点类型
:param cloud_info: 管控区域信息
:param biz_info: 业务ID及其对应的名称
:return
"""
# 节点变量,用于后续订阅任务注册主机,安装等操作
subscription_nodes = []
cipher = tools.HostTools.get_asymmetric_cipher()
for host in accept_list:
host_ap_id, host_node_type = self.check_ap_and_biz_scope(node_type, host, cloud_info)
instance_info = copy.deepcopy(host)
instance_info.update(
{
"is_manual": host["is_manual"],
"ap_id": host_ap_id,
"install_channel_id": host.get("install_channel_id"),
"bk_os_type": constants.BK_OS_TYPE[host["os_type"]],
"bk_host_innerip": host.get("inner_ip", ""),
"bk_host_innerip_v6": host.get("inner_ipv6", ""),
"bk_host_outerip": host.get("outer_ip", ""),
"bk_host_outerip_v6": host.get("outer_ipv6", ""),
"login_ip": host.get("login_ip", ""),
"username": get_request_username(),
"bk_biz_id": host["bk_biz_id"],
"bk_biz_name": biz_info.get(host["bk_biz_id"]),
"bk_cloud_id": host["bk_cloud_id"],
"bk_cloud_name": str(cloud_info.get(host["bk_cloud_id"], {}).get("bk_cloud_name")),
"bk_addressing": host["bk_addressing"],
"bk_supplier_account": settings.DEFAULT_SUPPLIER_ACCOUNT,
"host_node_type": host_node_type,
"os_type": host["os_type"],
"auth_type": host.get("auth_type", "MANUAL"),
"account": host.get("account", "MANUAL"),
"port": host.get("port"),
"password": tools.HostTools.USE_ASYMMETRIC_PREFIX + cipher.encrypt(host.get("password", "")),
"key": tools.HostTools.USE_ASYMMETRIC_PREFIX + cipher.encrypt(host.get("key", "")),
"retention": host.get("retention", 1),
"peer_exchange_switch_for_agent": host.get("peer_exchange_switch_for_agent"),
"bt_speed_limit": host.get("bt_speed_limit"),
"enable_compression": host.get("enable_compression"),
"agent_setup_extra_info": {"force_update_agent_id": host.get("force_update_agent_id", False)},
}
)
if host_node_type == constants.NodeType.PROXY and host.get("data_path"):
# proxy增加数据文件路径
instance_info.update({"data_path": host["data_path"]})
if host.get("bk_host_id"):
instance_info.update({"bk_host_id": host.get("bk_host_id")})
# 写入ticket
if host.get("auth_type") == constants.AuthType.TJJ_PASSWORD:
instance_info["extra_data"] = {"oa_ticket": host["ticket"]}
# 写入节点变量
subscription_nodes.append(
{
"bk_supplier_account": settings.DEFAULT_SUPPLIER_ACCOUNT,
"bk_cloud_id": host["bk_cloud_id"],
"ip": host.get("inner_ip", "") or host.get("inner_ipv6", ""),
"instance_info": instance_info,
}
)
return subscription_nodes
@staticmethod
def update_host(accept_list: list, ip_filter_list: list, is_manual: bool = False):
"""
用于更新identity认证信息
:param accept_list: 所有需要修改的数据
:param ip_filter_list: 过滤数据
:param is_manual: 是否手动安装
"""
identity_to_create = []
host_to_create = []
identity_id_to_delete = []
host_id_to_delete = []
# 获得需要修改的认证信息的rentention
if not is_manual:
# 非手动模式需要认证信息
identity_info = {
identity["bk_host_id"]: {
"auth_type": identity["auth_type"],
"retention": identity["retention"],
"account": identity["account"],
"password": identity["password"],
"key": identity["key"],
"port": identity["port"],
"extra_data": identity["extra_data"],
}
for identity in models.IdentityData.objects.filter(
bk_host_id__in=[host["bk_host_id"] for host in accept_list]
).values("bk_host_id", "auth_type", "retention", "account", "password", "key", "port", "extra_data")
}
else:
# 手动模式无需认证信息
identity_info = {}
host_info = {
host["bk_host_id"]: {
"bk_host_id": host["bk_host_id"],
"bk_biz_id": host["bk_biz_id"],
"bk_cloud_id": host["bk_cloud_id"],
"inner_ip": host["inner_ip"],
"inner_ipv6": host["inner_ipv6"],
"outer_ip": host["outer_ip"],
"outer_ipv6": host["outer_ipv6"],
"login_ip": host["login_ip"],
"data_ip": host["data_ip"],
"os_type": host["os_type"],
"node_type": host["node_type"],
"ap_id": host["ap_id"],
"install_channel_id": host["install_channel_id"],
"upstream_nodes": host["upstream_nodes"],
"created_at": host["created_at"],
"updated_at": host["updated_at"],
"is_manual": host["is_manual"],
"extra_data": host["extra_data"],
}
for host in models.Host.objects.filter(bk_host_id__in=[host["bk_host_id"] for host in accept_list]).values()
}
# 认证信息和Host校验
update_data_info, ip_filter_list = validator.bulk_update_validate(
host_info, accept_list, identity_info, ip_filter_list, is_manual
)
# 准备对需要修改的identity数据bulk_create
for host in update_data_info["modified_identity"]:
update_time = timezone.now()
the_identity = identity_info[host["bk_host_id"]]
# 更新ticket
if host.get("auth_type") == constants.AuthType.TJJ_PASSWORD:
extra_data = {"oa_ticket": host.get("ticket")}
else:
extra_data = the_identity["extra_data"]
identity_to_create.append(
models.IdentityData(
**{
"bk_host_id": host["bk_host_id"],
"auth_type": host.get("auth_type", the_identity["auth_type"]),
"account": host.get("account", the_identity["account"]),
"password": host.get("password", the_identity["password"]),
"port": host.get("port", the_identity["port"]),
"key": host.get("key", the_identity["key"]),
"retention": host.get("retention", the_identity["retention"]),
"extra_data": extra_data,
"updated_at": update_time,
}
)
)
identity_id_to_delete.append(host["bk_host_id"])
# 准备对需要修改的Host数据bulk_create
for host in update_data_info["modified_host"]:
# 如果 操作系统 或 接入点 发生修改
update_time = timezone.now()
origin_host = host_info[host["bk_host_id"]]
host_extra_data = {
"peer_exchange_switch_for_agent": host.get(
"peer_exchange_switch_for_agent",
origin_host["extra_data"].get("peer_exchange_switch_for_agent"),
),
"bt_speed_limit": host.get("bt_speed_limit", origin_host["extra_data"].get("bt_speed_limit")),
"enable_compression": host.get(
"enable_compression", origin_host["extra_data"].get("enable_compression")
),
}
# 更新为新传入或者使用原来的数据
if host.get("data_path") or origin_host["extra_data"].get("data_path"):
host_extra_data.update(
{"data_path": host.get("data_path") or origin_host["extra_data"].get("data_path")}
)
host_to_create.append(
models.Host(
**{
"bk_host_id": origin_host["bk_host_id"],
"bk_biz_id": origin_host["bk_biz_id"],
"bk_cloud_id": origin_host["bk_cloud_id"],
"inner_ip": origin_host["inner_ip"],
"outer_ip": origin_host["outer_ip"],
"inner_ipv6": origin_host["inner_ipv6"],
"outer_ipv6": origin_host["outer_ipv6"],
"login_ip": host.get("login_ip", origin_host["login_ip"]),
"data_ip": origin_host["data_ip"],
"os_type": host.get("os_type", origin_host["os_type"]),
"node_type": origin_host["node_type"],
"ap_id": host.get("ap_id", origin_host["ap_id"]),
"install_channel_id": host.get("install_channel_id", origin_host["install_channel_id"]),
"upstream_nodes": origin_host["upstream_nodes"],
"created_at": origin_host["created_at"],
"updated_at": update_time,
"is_manual": is_manual,
"extra_data": host_extra_data,
}
)
)
host_id_to_delete.append(host["bk_host_id"])
with transaction.atomic():
# 修改是否手动安装为is_manual
host_id_no_modified = [host["bk_host_id"] for host in update_data_info["not_modified_host"]]
models.Host.objects.filter(bk_host_id__in=host_id_no_modified).update(is_manual=is_manual)
# 删除需要修改的原数据
models.IdentityData.objects.filter(bk_host_id__in=identity_id_to_delete).delete()
models.Host.objects.filter(bk_host_id__in=host_id_to_delete).delete()
# bulk_create创建新的信息
models.IdentityData.objects.bulk_create(identity_to_create)
models.Host.objects.bulk_create(host_to_create)
return update_data_info["subscription_host_ids"], ip_filter_list
def operate(self, job_type, bk_host_ids, bk_biz_scope, extra_params, extra_config):
"""
用于只有bk_host_id参数的下线、重启等操作
"""
# 校验器进行校验
subscription = self.create_subscription(
job_type, bk_host_ids, extra_params=extra_params, extra_config=extra_config
)
return tools.JobTools.create_job(
job_type=job_type,
subscription_id=subscription["subscription_id"],
task_id=subscription["task_id"],
bk_biz_scope=bk_biz_scope,
statistics={
"success_count": 0,
"failed_count": 0,
"pending_count": len(bk_host_ids),
"running_count": 0,
"total_count": len(bk_host_ids),
},
)
def create_subscription(
self,
job_type,
nodes: list,
extra_params: Optional[Dict[str, Any]] = None,
extra_config: Optional[Dict[str, Any]] = None,
):
"""
创建订阅任务
:param job_type: INSTALL_AGENT
:param nodes: 任务范围
:param extra_params: 额外的参数
:param extra_config: 额外的配置
1.重装、卸载等操作
[{"bk_host_id": 1}, {"bk_host_id": 2}]
2.新装,替换:
[
{
"bk_supplier_account": "0",
"bk_cloud_id": 0,
"ip": "127.0.0.1",
"instance_info": {
"ap_id": 1,
"bk_os_type": "1",
"bk_host_innerip": "127.0.0.1",
"bk_host_outerip": "127.0.0.1",
"bk_biz_id": 2,
"bk_biz_name": "蓝鲸",
"bk_cloud_id": 0,
"bk_cloud_name": "default area",
"bk_supplier_account": "0",
"auth_type": "PASSWORD",
"account": "root",
"port": 22,
"auth_type": "PASSWORD",
"password": "xxx",
"key": "",
"retention": 1
}
}
]
:return:
"""
extra_params = extra_params or {}
extra_config = extra_config or {}
params = {
"run_immediately": True,
"bk_app_code": "nodeman",
"bk_username": "admin",
"scope": {"node_type": "INSTANCE", "object_type": "HOST", "nodes": nodes},
"steps": [
{
"id": "agent",
"type": "AGENT",
"config": {"job_type": job_type, **extra_config},
"params": {"context": {}, "blueking_language": get_language(), **extra_params},
}
],
}
return NodeApi.create_subscription(params)
def retry(self, instance_id_list: List[str] = None):
"""
重试部分实例或主机
:param instance_id_list: 需重试的实例列表
:return: task_id_list
"""
params = {
"subscription_id": self.data.subscription_id,
"task_id_list": self.data.task_id_list,
"instance_id_list": instance_id_list,
}
task_id = NodeApi.retry_subscription_task(params)["task_id"]
self.data.task_id_list.append(task_id)
if instance_id_list:
running_count = self.data.statistics["running_count"] + len(instance_id_list)
failed_count = self.data.statistics["failed_count"] - len(instance_id_list)
else:
running_count = self.data.statistics["failed_count"]
failed_count = 0
self.data.statistics.update({"running_count": running_count, "failed_count": failed_count})
self.data.status = constants.JobStatusType.RUNNING
self.data.save()
return self.data.task_id_list
def revoke(self, instance_id_list: list):
params = {
"subscription_id": self.data.subscription_id,
}
if instance_id_list:
params["instance_id_list"] = instance_id_list
NodeApi.revoke_subscription_task(params)
self.data.status = constants.JobStatusType.TERMINATED
self.data.end_time = timezone.now()
self.data.save()
return self.data.task_id_list
def retrieve(self, params: Dict[str, Any]):
"""
任务详情页接口
:param params: 接口请求参数
"""
if self.data.task_id_list:
try:
task_result = NodeApi.get_subscription_task_status(
tools.JobTools.parse2task_result_query_params(job=self.data, query_params=params)
)
except ApiResultError as err:
logger.exception(err)
if err.code != SubscriptionTaskNotReadyError().code:
raise err
# 任务未准备就绪
task_result = {"list": [], "total": 0, "status_counter": {"total": 0}}
else:
# 任务已准备就绪,但执行数量为0,代表没有需要变更的主机,插入一条忽略的主机在前端进行提示
# task_result["total"]是筛选条件过滤后的数量,全部执行数量通过状态计数获取 - task_result["status_counter"]
if task_result["status_counter"].get("total", 0) == 0 and not self.data.error_hosts:
# lazy object 通过save保存到db,如果不先转为字符串,会报错:
# TypeError at /en/ Object of type '__proxy__' is not JSON serializable
# 参考:https://stackoverflow.com/questions/48454398/
self.data.error_hosts = [
{"ip": "", "msg": str(_("没有需要变更的实例")), "status": constants.JobStatusType.IGNORED}
]
self.data.save(update_fields=["error_hosts"])
else:
# 异步执行任务,任务状态默认为PENDING
task_result = {"list": [], "total": 0, "status_counter": {"total": 0}}
bk_host_ids = []
host_execute_status_list = []
for instance_status in task_result["list"]:
host_info = instance_status["instance_info"]["host"]
job_type_info = tools.JobTools.unzip_job_type(
tools.JobTools.get_job_type_in_inst_status(instance_status, self.data.job_type)
)
inner_ip = host_info.get("bk_host_innerip")
inner_ipv6 = host_info.get("bk_host_innerip_v6")
host_execute_status = {
"instance_id": instance_status["instance_id"],
"ip": inner_ip or inner_ipv6,
"inner_ip": inner_ip,
"inner_ipv6": inner_ipv6,
"bk_host_id": host_info.get("bk_host_id"),
"bk_cloud_id": host_info["bk_cloud_id"],
"bk_cloud_name": host_info.get("bk_cloud_name"),
"bk_biz_id": host_info["bk_biz_id"],
"bk_biz_name": host_info["bk_biz_name"],
"status": instance_status["status"],
"start_time": local_dt_str2utc_dt(dt_str=instance_status["start_time"]),
"end_time": local_dt_str2utc_dt(dt_str=instance_status["finish_time"]),
**{"op_type": job_type_info["op_type"], "op_type_display": job_type_info["op_type_display"]},
**tools.JobTools.get_current_step_display(instance_status),
}
if host_execute_status["start_time"]:
end_time = host_execute_status["end_time"] or timezone.now()
# 不能通过.seconds获取datetime对象的时间差值总秒数,在间隔超过一天时会有bug
# 在源码中,.seconds的计算为:days, seconds = divmod(seconds, 24*3600),由一天的总秒数取模而得
# 正确做法是使用 .total_seconds():((self.days * 86400 + self.seconds) * 106 + self.microseconds) / 106
# 参考:https://stackoverflow.com/questions/4362491/
host_execute_status["cost_time"] = (end_time - host_execute_status["start_time"]).total_seconds()
host_execute_status_list.append(host_execute_status)
bk_host_ids.append(host_info.get("bk_host_id"))
id__host_extra_info_map = {
host_extra_info["bk_host_id"]: host_extra_info
for host_extra_info in models.Host.objects.filter(bk_host_id__in=bk_host_ids).values(
"bk_host_id", "ap_id", "is_manual", "os_type", "cpu_arch"
)
}
for host in host_execute_status_list:
host["is_manual"] = id__host_extra_info_map.get(host.get("bk_host_id"), {}).get("is_manual", False)
host["ap_id"] = id__host_extra_info_map.get(host.get("bk_host_id"), {}).get("ap_id")
statuses_in_conditions = tools.JobTools.fetch_values_from_conditions(
conditions=params.get("conditions", []), key="status"
)
filter_hosts = []
for host in self.data.error_hosts:
status = host.get("status", constants.JobStatusType.FAILED)
# conditions中无status或者筛选条件为空列表 视为全选,过滤不在筛选条件中的排除主机
if statuses_in_conditions and status not in statuses_in_conditions:
continue
filter_host_info: Dict[str, Union[bool, str, int]] = {
"filter_host": True,
"bk_host_id": host.get("bk_host_id"),
"ip": host["ip"],
"inner_ip": host.get("inner_ip"),
"inner_ipv6": host.get("inner_ipv6"),
"bk_cloud_id": host.get("bk_cloud_id"),
"bk_cloud_name": host.get("bk_cloud_name"),
"bk_biz_id": host.get("bk_biz_id"),
"bk_biz_name": host.get("bk_biz_name"),
"job_id": host.get("job_id"),
"status": host.get("status") or constants.JobStatusType.FAILED,
"status_display": host.get("msg"),
"step": "",
}
host_suppressed_by_id = host.get("suppressed_by_id", None)
if host_suppressed_by_id is not None:
filter_host_info.update({"suppressed_by_id": host_suppressed_by_id})
filter_hosts.append(filter_host_info)
host_execute_status_list.extend(filter_hosts)
# 补充业务名、管控区域名称
cloud_id_name_map = models.Cloud.cloud_id_name_map()
biz_name_map = CmdbHandler.biz_id_name_without_permission()
for host_execute_status in host_execute_status_list:
host_execute_status.update(
bk_biz_name=biz_name_map.get(host_execute_status.get("bk_biz_id")),
bk_cloud_name=cloud_id_name_map.get(host_execute_status["bk_cloud_id"]),
)
tools.JobTools.update_job_statistics(self.data, task_result["status_counter"])
job_detail = {
"job_id": self.data.id,
"created_by": self.data.created_by,
"job_type": self.data.job_type,
"job_type_display": constants.JOB_TYPE_DICT.get(self.data.job_type, ""),
"ip_filter_list": [host["ip"] for host in self.data.error_hosts],
"total": task_result["total"],
"list": host_execute_status_list,
"statistics": self.data.statistics,
"status": self.data.status,
"end_time": self.data.end_time,
"start_time": self.data.start_time,
}
tools.JobTools.fill_cost_time(job_detail, job_detail)
tools.JobTools.fill_sub_info_to_job_detail(job=self.data, job_detail=job_detail)
if job_detail["meta"].get("category") != models.Subscription.CategoryType.POLICY:
return job_detail
# 策略关联任务填充目标版本及当前插件版本
policy_info = tools.PolicyTools.get_policy(self.data.subscription_id, show_deleted=True, need_steps=True)
os_cpu__config_map = tools.PolicyTools.get_os_cpu__config_map(policy_info)
bk_host_id__plugin_version_map = tools.HostV2Tools.get_bk_host_id_plugin_version_map(
project=policy_info["plugin_name"], bk_host_ids=bk_host_ids
)
for host_execute_status in job_detail["list"]:
host_extra_info = id__host_extra_info_map.get(host_execute_status["bk_host_id"])
if not host_extra_info:
host_execute_status.update({"current_version": None, "target_version": None})
continue
os_cpu_key = f"{host_extra_info['os_type'].lower()}_{host_extra_info['cpu_arch']}"
host_execute_status["current_version"] = bk_host_id__plugin_version_map.get(
host_execute_status["bk_host_id"]
)
host_execute_status["target_version"] = os_cpu__config_map.get(os_cpu_key, {}).get("version")
return job_detail
@staticmethod
def get_log_base(subscription_id: int, task_id_list: List[int], instance_id: str) -> list:
"""
根据订阅任务ID,实例ID,获取日志
:param subscription_id: 订阅任务ID
:param task_id_list: 任务ID列表
:param instance_id: 实例ID
:return: 日志列表
"""
params = {"subscription_id": subscription_id, "instance_id": instance_id, "task_id_list": task_id_list}
task_result_detail = NodeApi.get_subscription_task_detail(params)
logs = []
if task_result_detail.get("steps"):
if task_result_detail["steps"][0].get("target_hosts"):
for step in task_result_detail["steps"][0]["target_hosts"][0].get("sub_steps"):
logs.append(
{
"step": step["node_name"],
"status": step["status"],
"log": step["log"],
"start_time": step.get("start_time"),
"finish_time": step.get("finish_time"),
}
)
return logs
def get_log(self, instance_id: str) -> list:
"""
获得日志
:param instance_id: 实例ID
:return: 日志列表
"""
# 获得并返回日志
return JobHandler.get_log_base(self.data.subscription_id, self.data.task_id_list, instance_id)
def collect_log(self, instance_id: int) -> list:
return NodeApi.collect_subscription_task_detail({"job_id": self.job_id, "instance_id": instance_id})
def retry_node(self, instance_id: str):
"""
安装过程原子粒度重试
:param instance_id: 实例id,eg: host|instance|host|127.0.0.1-0-0
:return: 重试pipeline节点id,重试节点名称
{
"retry_node_id": "6f48169ed1193574961757a57d03a778",
"retry_node_name": "安装"
}
"""
params = {
"subscription_id": self.data.subscription_id,
"instance_id": instance_id,
}
retry_node_info = NodeApi.retry_node(params)
# 更新作业执行情况
running_count = self.data.statistics["running_count"] + 1
failed_count = self.data.statistics["failed_count"] - 1
self.data.statistics.update({"running_count": running_count, "failed_count": failed_count})
self.data.status = constants.JobStatusType.RUNNING
self.data.save(update_fields=["statistics", "status"])
return retry_node_info
|
984,158 | d6d8672f6a0cef3b34a105d0434c8709243076db | n = int(input())
lst = [[] for i in range(n)]
for i in range(n):
x,y = map(str,input().split())
lst[i].append(int(x))
lst[i].append(y)
for i in range(int(n/2)):
lst[i][1] = '-'
min1 = min(lst)[0]
max1 = max(lst)[0]
lst2 = [[] for i in range(min1,max1+1)]
for i in range(n):
lst2[lst[i][0]].append(lst[i][1])
concat_list = [j for i in lst2 for j in i]
print(*concat_list)
|
984,159 | 793f3b91796de16c4bbabce5a297fde76d574778 | import re
def judge(oscn_html):
judge_re = r'Judge:\s*([\w\s\,]*)'
find_judge = re.compile(judge_re, re.M)
judge_search = find_judge.search(oscn_html)
if judge_search.group:
return judge_search.group(1)
else:
return None
setattr(judge,'target',['OSCNrequest'])
|
984,160 | d2db98654e259a0e583483ee4951f5ed616436db | import os
import gc
import sha
import gtk
import ImageAreaSelector
class Avatar( object ):
'''this class represent an avatar'''
def __init__( self, path, avatarsPath, dontCache=False,
height=96, width=96, thumbHeight=48, thumbWidth=48,
resizeDialog=False ):
if path == '':
self.noAvatar()
elif not self.loadExisting(path):
self.loadNew(path, avatarsPath, height, width, thumbHeight,
thumbWidth, resizeDialog)
def loadNew( self, path, avatarsPath, height, width, thumbHeight,
thumbWidth, resizeDialog ):
self.height = height
self.width = width
self.thumbHeight = thumbHeight
self.thumbWidth = thumbWidth
pixbuf = gtk.gdk.pixbuf_new_from_file( path )
if resizeDialog:
areaSelector = ImageAreaSelector.ImageAreaSelectorDialog(pixbuf)
response, pixbuf = areaSelector.run()
if response == gtk.RESPONSE_CANCEL:
self.noAvatar()
return
self.image = self.scale( pixbuf, width, height )
self.thumb = self.scale( pixbuf, thumbWidth, thumbHeight )
imagePath = avatarsPath + os.sep + 'temp.png'
thumbPath = avatarsPath + os.sep + 'temp_thumb.png'
self.image.save( imagePath, 'png' )
self.thumb.save( thumbPath, 'png' )
f = file(imagePath, 'rb')
hash = sha.new(f.read())
f.close()
self.imagePath = avatarsPath + os.sep + hash.hexdigest() + '.png'
self.thumbPath = avatarsPath + os.sep + hash.hexdigest() + '_thumb.png'
if not os.path.exists( self.thumbPath ):
os.rename( thumbPath, self.thumbPath )
else:
os.remove( thumbPath )
if not os.path.exists( self.imagePath ):
os.rename( imagePath, self.imagePath )
else:
os.remove( imagePath )
if os.path.dirname( os.path.abspath( path ) ) == os.path.abspath( avatarsPath ):
os.remove( path )
thumbName = path.split( '.png' )[ 0 ] + '_thumb.png'
try:
os.remove( thumbName )
except:
print 'could not remove ' + thumbName
def noAvatar(self):
self.imagePath = ''
self.thumbPath = ''
self.image = None
self.thumb = None
self.height = 0
self.width = 0
self.thumbHeight = 0
self.thumbWidth = 0
def loadExisting( self, path ):
self.imagePath = path
self.thumbPath = path[:-4] + '_thumb.png'
try:
self.image = gtk.gdk.pixbuf_new_from_file( self.imagePath )
self.thumb = gtk.gdk.pixbuf_new_from_file( self.thumbPath )
except:
return False
self.height = self.image.get_height()
self.width = self.image.get_width()
self.thumbHeight = self.thumb.get_height()
self.thumbWidth = self.thumb.get_width()
return True
def getImagePath( self ):
return self.imagePath
def getThumbPath( self ):
return self.thumbPath
def getImage( self ):
return self.image
def getThumb( self ):
return self.thumb
def scale( self, image, width, height ):
h,w = image.get_height(), image.get_width()
width_max, height_max = width, height
width=float( image.get_width() )
height=float( image.get_height() )
if ( width/width_max ) > ( height/height_max ):
height=int( ( height/width )*width_max )
width=width_max
else:
width=int( ( width/height )*height_max )
height=height_max
image = image.scale_simple( width, height, gtk.gdk.INTERP_BILINEAR )
gc.collect() # Tell Python to clean up the memory
return image
|
984,161 | fe0c0a090c58794cdf27dc38dcff618427cbda63 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Zhihao Xie \(#^o^)/
# Date: 2017.05.31
# Version: v1.0.0
# CopyRight: Copyright ©Zhihao Xie, All rights reserved.
import os, sys, re
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
if len(sys.argv) < 3:
sys.stderr.write("Usage: python3 %s <gene_seq> <translate_outSeq> [code_table]\n" % sys.argv[0])
sys.exit()
seqFile = os.path.abspath(sys.argv[1])
outSeq = os.path.abspath(sys.argv[2])
if len(sys.argv) == 4:
code_table = sys.argv[3]
else:
code_table = 11
handle = open(outSeq, "w")
for seq_record in SeqIO.parse(seqFile, "fasta"):
seq_id = seq_record.id
seq_desc = seq_record.description.replace(seq_record.id, "")
seq_seq = seq_record.seq
protein_seq = seq_seq.translate(table=code_table)
pro_length = len(protein_seq)-1
if re.search(r"\d+_?nt", seq_desc):
seq_desc = re.sub(r"\d+_?nt", str(pro_length)+"_aa", seq_desc)
protein_record = SeqRecord(Seq(str(protein_seq).rstrip("*"), IUPAC.protein), id=seq_id, description=seq_desc)
SeqIO.write(protein_record, handle, "fasta")
handle.close()
|
984,162 | e5da9cd561b35af74b64b163ef271843b99cb2ad | import os
import pandas as pd
import hdf5_getters
def _extractSongData(file_path, filename):
# song_id, title, release, artist_name, year
h5 = hdf5_getters.open_h5_file_read(file_path)
track_id = filename[:-3]
song_id = hdf5_getters.get_song_id(h5).decode('UTF-8')
dig7_id = hdf5_getters.get_track_7digitalid(h5)
title = hdf5_getters.get_title(h5).decode('UTF-8')
release = hdf5_getters.get_release(h5).decode('UTF-8')
artist_name = hdf5_getters.get_artist_name(h5).decode('UTF-8')
year = hdf5_getters.get_year(h5)
h5.close()
# print(song_id, track_id, dig7_id, title, release, artist_name, year)
return track_id, song_id, dig7_id, title, release, artist_name, year
def _fileProgress(ith_file, total_files, filename):
print('[{0}/{1}] {2}'.format(ith_file, total_files, filename))
def extractBulk(dirPath):
song_data_df = pd.DataFrame(columns=['track_id', 'song_id', 'dig7_id', 'title', 'release', 'artist_name', 'year'])
print('Directory path: '+ dirPath)
# walking through a directory to get do bulk time-frequency representation of the audio
for root, subdirs, files in os.walk(dirPath):
i = 0
for filename in files:
i = i + 1
if filename.endswith('.h5'):
_fileProgress(i, len(files), filename)
file_path = os.path.join(root, filename)
# song_id, tags = _extractSongTags(file_path)
# # print(song_id, tags)
song_data_df.loc[len(song_data_df)] = _extractSongData(file_path, filename)
# song_tags_df = song_tags_df.append(_extractSongData(file_path))
# return song_data_df
return song_data_df
if __name__ == '__main__':
# print(list(filter(lambda x: x[:3] == 'get', hdf5_getters.__dict__.keys())))
song_data_df = extractBulk('../dataset/raw/MillionSongSubset')
print(song_data_df)
song_data_df.to_csv('../dataset/MSD_songs.csv', sep='\t', encoding='utf-8', index=False) |
984,163 | 5041cdb54d6f52fc258bdfbaed8ab331aa18eaa7 | from setuptools import setup
setup(
name='exercise-2-ffmpeg-rashmi',
version='',
packages=[''],
url='',
license='',
author='Rashmi ',
author_email='rashmi23@bu.edu',
description='',
install_requires=['flake8','pytest']
)
|
984,164 | 1d79298e1c8942b8167bb54feee2415108561621 | import tensorflow as tf
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import roc_curve,auc
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
class network:
def __init__(self, batch_size=32, epoch=50, lr=1e-4,
max_lr=0.02, enlarge_lr=1.005, reduce_lr=0.98):
self.batch_size = batch_size
self.epoch = epoch
self.lr = lr
self.max_lr = max_lr
self.enlarge_lr = enlarge_lr
self.reduce_lr = reduce_lr
self.e = 0
def build(self, a2, a3, a4):
self.x = tf.placeholder(dtype=tf.float32, shape=[None, 11], name='x_layer1')
self.y = tf.placeholder(dtype=tf.float32, shape=[None, 11], name='y_layer5')
self.learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate')
with tf.variable_scope('inputs_1'):
layer2 = tf.layers.dense(self.x, a2, activation=tf.nn.tanh, name='layer2')
with tf.variable_scope('encoder'):
layer3 = tf.layers.dense(layer2, a3, name='layer3') # can't change
encoder = step_wise(layer3, n=a3) # the most important
with tf.variable_scope('outputs_5'):
layer4 = tf.layers.dense(encoder, a4, activation=tf.nn.tanh, name='layer4')
self.outputs = tf.layers.dense(layer4, 11, activation=tf.nn.sigmoid, name='layer5')
with tf.name_scope('optimize'):
self.loss = tf.reduce_mean(tf.losses.mean_squared_error(labels=self.y,
predictions=self.outputs))
self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
tf.summary.scalar('loss', self.loss)
self.merged = tf.summary.merge_all()
def reset(self):
index = np.arange(0, self.train_X.shape[0], 1)
np.random.shuffle(index)
self.train_X = self.train_X[index, :]
# self.train_y = self.train_y[index]
self.current_index = 0
def next_batch(self):
assert self.current_index < self.train_X.shape[0]
batch_x = self.train_X[self.current_index:(self.current_index + self.batch_size), :]
batch_y = batch_x
return batch_x, batch_y
def train(self, X, val_X, sess=None):
self.train_X = X
num_batch = self.train_X.shape[0] // self.batch_size
sess.run(tf.global_variables_initializer())
self.writer_train = tf.summary.FileWriter('G:/python_file/wine/train/', sess.graph)
self.writer_val = tf.summary.FileWriter('G:/python_file/wine/val/', sess.graph)
num = 0
for _ in tqdm(range(self.epoch), desc='epoch'):
# for _ in range(self.epoch):
self.reset()
for _ in range(num_batch):
num += 1
(batch_x, batch_y) = self.next_batch()
feed_dict = {self.x: batch_x, self.y: batch_y,
self.learning_rate: self.lr}
_, e, train_result = sess.run([self.train_op, self.loss, self.merged],
feed_dict=feed_dict)
val_result = sess.run(self.merged, feed_dict={self.x:val_X, self.y:val_X})
self.writer_train.add_summary(train_result, num)
self.writer_val.add_summary(val_result, num)
if e > 1.01 * self.e:
self.lr *= self.reduce_lr
elif e < self.e and self.lr < self.max_lr:
self.lr *= self.enlarge_lr
else:
self.lr = self.lr
self.e = e
def step_wise(theta, a=100, n=4): # theta : tensor with shape of 32,
out = tf.zeros_like(theta)
for i in range(1, n):
out = tf.add(tf.nn.tanh(a*(theta-tf.constant(i/n))), out)
out = 1/2 + 1/4 * out
return out
def split_data(data, train_ratio, good_class=[4,5,6,7], bad_class=[1,2,3,8,9,10], shuffle=False):
good_example = np.concatenate([data[index,:] for index,class_ in enumerate(data[:, -1])\
if class_ in good_class]).reshape([-1, 12])
good_example_nums = good_example.shape[0]
bad_example = np.concatenate([data[index,:] for index,class_ in enumerate(data[:,-1])\
if class_ in bad_class]).reshape([-1, 12])
bad_example_nums = bad_example.shape[0]
if shuffle:
index1 = list(range(good_example_nums))
np.random.shuffle(index1)
good_example = good_example[index1]
index2 = list(range(bad_example_nums))
np.random.shuffle(index2)
bad_example = bad_example[index2]
# train 选择完数据之后, 剩下的对半分
total_example_nums = data.shape[0]
train_num = int(np.floor(total_example_nums * train_ratio))
assert good_example_nums > train_num
val_good_num = int((good_example_nums - train_num) // 2)
val_bad_num = int(bad_example_nums // 2)
train_X = good_example[:train_num, :-1]
train_y = good_example[:train_num, -1]
val_X_good = good_example[train_num:(train_num + val_good_num), :-1]
val_X_bad = bad_example[:val_bad_num, :-1]
val_X = np.concatenate([val_X_good,val_X_bad]).reshape([-1, 11])
val_y_good = good_example[train_num:(train_num + val_good_num), -1]
val_y_bad = bad_example[:val_bad_num, -1]
val_y = np.concatenate([val_y_good,val_y_bad])
test_X_good = good_example[(train_num + val_good_num):, :-1]
test_X_bad = bad_example[val_bad_num:, :-1]
test_X = np.concatenate([test_X_good, test_X_bad]).reshape([-1, 11])
test_y_good = good_example[(train_num + val_good_num):, -1]
test_y_bad = bad_example[val_bad_num:, -1]
test_y = np.concatenate([test_y_good, test_y_bad])
return train_X, train_y, val_X, val_y, test_X, test_y
def prepare(file='C:/Users/tianping/Desktop/winequality-red.csv'):
origin_data = pd.read_csv(file)
# print(data.columns)
data = [origin_data.ix[i, 0].split(';') for i in range(origin_data.shape[0])]
data = np.array(data, dtype='float32')
data[:, :-1] = preprocessing.minmax_scale(data[:, :-1], axis=0)
return pd.DataFrame(data, columns=origin_data.columns[0].split(';'))
if __name__ == '__main__':
data = prepare()
data_array = np.array(data)
train_X, train_y, val_X, val_y, test_X, test_y = split_data(data= data_array,
train_ratio=0.6)
val_num = val_X.shape[0]
val_labels = np.zeros(shape=[val_num, 1])
for i, class_ in enumerate(val_y):
if class_ in [1, 2, 3, 8, 9, 10]:
val_labels[i] = 1
tf.reset_default_graph()
sess = tf.Session()
net = network(epoch=200, lr=1e-4)
net.build(a2=9, a3=4, a4=9)
net.train(X=train_X,val_X=val_X,sess=sess)
val_output = sess.run(net.outputs,
feed_dict={net.x: val_X})
val_dist = np.zeros(shape=[val_num, 1])
for i, x in enumerate(val_X):
val_dist[i] = np.linalg.norm(x-val_output[i]) # euclidean distance
fpr, tpr, thresholds = roc_curve(val_labels, val_dist)
roc_auc = auc(fpr, tpr)
print('val auc:%s'%(roc_auc))
test_num = test_X.shape[0]
test_dist = np.zeros(shape=[test_num, 1])
test_output = sess.run(net.outputs, feed_dict={net.x:test_X})
for i, x in enumerate(test_X):
test_dist[i] = np.linalg.norm(x-test_output[i]) # euclidean distance
# test_dists.append(test_dist)
# dists = np.mean(np.concatenate(test_dists, axis=1), axis=1)
test_labels = np.zeros(shape=[test_num, 1])
for i, class_ in enumerate(test_y):
if class_ in [1, 2, 3, 8, 9, 10]:
test_labels[i] = 1
fpr, tpr, thresholds = roc_curve(test_labels, test_dist)
test_roc_auc = auc(fpr, tpr)
print('test auc: %s'%test_roc_auc)
|
984,165 | 7c26241a41e9de69b5689104c478e219cc6dc589 | import os
MESSAGE_REPLICATION_STATUS_FAILED = "failed"
MESSAGE_REPLICATION_STATUS_OK = "OK"
DELAY = int(os.environ.get('DELAY', 0))
|
984,166 | be7a9efaa5a43abd7732aed9b200fbd54196ede7 | # -*-coding:utf-8-*-
import argparse
import os
import platform
import json
import re
import threading
import time
from serial import Serial, SerialException
from arrosage_database_manager import RecuperateurDonnees
import datetime
import collections
import numpy as np
import pickle
import generateur_graphique_meteo
from constantes import *
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "arrosage_automatique.settings")
#import django
#django.setup()
from gestion_temps import *
#from gestion_arrosage_automatique.models import ConditionsMeteorologiques, ConditionArrosage
__author__ = 'besnier'
# port_serie = Serial(port = PORT, baudrate = 9600)
def trouver_ports_libres():
available = []
for i in range(256):
try:
s = Serial(i)
available.append((i, s.portstr))
s.close()
except:
continue
print(available)
class Decideur(threading.Thread):
def __init__(self, lePort):
threading.Thread.__init__(self)
self.commu = Communication_Arduino(lePort)
self.recuperateur = RecuperateurDonnees()
self.dm = Mesure(codes_arduino)
self.dm.initialiser_mesures()
self.arro = Arrosage()
def run(self):
"""
Méthode principale, là où tout se passe.
:return:
"""
print("on mesure aussi !")
while True:
#print 'on vérifie'
print("début boucle")
try:
# maintenant = time.time()
date_maintenant = datetime.datetime.now()
print(self.dm.l_grandeurs_a_mesurer)
for code in self.dm.l_grandeurs_a_mesurer:
print(code)
self.commu.parler(code)
self.dm.mettre_a_jour_demandes(code)
time.sleep(3)
recu = self.commu.ecouter()
print(recu)
recu = recu.split("_")
if len(recu) == 2 and recu[0] in codes_capteurs:
code_capteur = recu[0]
#mesures_et_arrosages.db
valeur = recu[1].split("\r")[0]
self.dm.mettre_a_jour_receptions(code)
self.recuperateur.enregistrer_mesure(valeur, d_code_table_capteurs[code_capteur])
elif len(recu) == 0:
print("rien reçu")
else:
with open(os.path.join("static", "json_files", "log.json"), "a") as f:
json.dump({repr(datetime.datetime.now()): "truc bizarre reçu "+"_".join(recu)}, f)
self.dm.pour_faire_nouvelles_mesures(30)
# Voir si la carte renvoie quelque chose malgré la non réception de valeurs des capteurs
if self.dm.non_reception[codes_capteurs.index("HS")]:
self.commu.demander_si_bonne_reception("beth")
elif self.dm.non_reception[codes_capteurs.index("TE")] and dm.non_reception[codes_capteurs.index("HA")] \
and self.dm.non_reception[codes_capteurs.index("LU")]:
self.commu.demander_si_bonne_reception("gimel")
if self.arro.verifier_si_on_arrose(5):
self.commu.arroser()
if self.arro.verifier_si_on_arrete(5):
self.commu.eteindre_arrosage()
time.sleep(3)
if date_maintenant.minute % 15 == 0:
os.system("scp /home/pi/arrosage_automatique/arrosage_automatique/mesures_et_arrosages.db pi@192.168.1.27:/var/www/html/BLOGS/blog_flask/mesures_et_arrosages.db")
time.sleep(1)
except SerialException:
print("impossible d'accéder au port")
break
"""
except:
print "rien du tout"
self.commu.quitter()
break
"""
class Arrosage:
def __init__(self, chemin=os.path.join("static", "json_files"), nom_fichier="parametres_simples_arrosage.json"):
self.chemin = chemin
self.nom_fichier = nom_fichier
if os.listdir(self.chemin):
self.creer_parametres_par_defaut()
self.en_train_d_arroser = False
self.charger_horaires()
def creer_parametres_par_defaut(self):
with open(os.path.join(self.chemin, self.nom_fichier), "w") as f:
d = {"1": [{"heure": 6, "minute": 0}, {"heure": 6, "minute": 15}],
"2": [{"heure": 20, "minute": 30}, {"heure": 20, "minute": 45}]}
# print(d)
json.dump(d, f)
def charger_horaires(self):
with open(os.path.join(self.chemin, self.nom_fichier), "r") as f:
self.horaires_d_arrosage = json.load(f)
def verifier_si_on_arrose(self, minutes, type_arrosage="defaut"):
if type_arrosage == "defaut":
self.decision_temporelle_pour_demarrer(5)
self.en_train_d_arroser = True
else:
return False
def verifier_si_on_arrete(self, minutes):
maintenant = datetime.datetime.now()
if self.en_train_d_arroser :
for cle in self.horaires_d_arrosage.keys():
n = self.horaires_d_arrosage[cle]
heure_d_arrosage = maintenant.replace(hour=n[1]["heure"], minute=n[1]["minute"])
if moins_minute(maintenant, heure_d_arrosage, minutes):
return True
return False
def decision_temporelle_pour_demarrer(self, minutes):
"""
:param minutes: plus ou moins ça quand on va arroser
:return:
"""
maintenant = datetime.datetime.now()
# print(self.horaires_d_arrosage)
for cle in self.horaires_d_arrosage.keys():
n = self.horaires_d_arrosage[cle]
# print(n, self.horaires_d_arrosage)
heure_d_arrosage = maintenant.replace(hour=n[0]["heure"], minute=n[0]["minute"])
if moins_minute(maintenant, heure_d_arrosage, minutes):
return True
return False
class Mesure:
"""
Classe instanciée qu'une seule fois
"""
def __init__(self, l_grandeurs_codee):
self.l_grandeurs_codee = l_grandeurs_codee
maintenant = datetime.datetime.now()
if maintenant.minute < 59:
avant = maintenant.replace(minute=maintenant.minute+1)
else:
avant = maintenant.replace(minute=maintenant.minute-1)
self.dates_dernieres_demandes = [avant]*len(l_grandeurs_codee)
self.dates_dernieres_receptions = [maintenant]*len(l_grandeurs_codee)
self.non_reception = [False]* len(l_grandeurs_codee)
self.l_grandeurs_a_mesurer = []
def initialiser_mesures(self):
self.l_grandeurs_a_mesurer = [i for i in self.l_grandeurs_codee]
def pour_faire_nouvelles_mesures(self, intervalle_entre_mesures):
maintenant = datetime.datetime.now()
# print("self.l_grandeurs_codee", self.l_grandeurs_codee)
for i in range(len(self.l_grandeurs_codee)):
intervalle_mesuree = (self.dates_dernieres_receptions[i] - self.dates_dernieres_demandes[i])
intervalle_attente = (maintenant - self.dates_dernieres_receptions[i])
print(self.l_grandeurs_codee[i])
print(intervalle_mesuree)
print(intervalle_attente)
if intervalle_attente.seconds > intervalle_entre_mesures and not self.non_reception[i] and self.l_grandeurs_codee[i] not in self.l_grandeurs_a_mesurer:
self.l_grandeurs_a_mesurer.append(self.l_grandeurs_codee[i])
elif intervalle_attente.seconds > intervalle_entre_mesures*3 and self.non_reception[i] and self.l_grandeurs_codee[i] not in self.l_grandeurs_a_mesurer:
self.l_grandeurs_a_mesurer.append(self.l_grandeurs_codee[i])
if - intervalle_mesuree.seconds > intervalle_entre_mesures*10:
self.non_reception[i] = True
def log_etat_capteurs(self):
try:
with open(os.path.join("static", "json_files", "log_etats_capteurs.json"), "w") as f:
json.dump({self.l_grandeurs_codee[i]: self.non_reception[i] for i in range(len(self.l_grandeurs_codee))}, f)
except IOError:
with open(os.path.join("static", "json_files", "log.json"), "a") as f:
json.dump({repr(datetime.datetime.now()): "problème avec le log de l'état des capteurs"}, f)
def mettre_a_jour_demandes(self, code):
self.dates_dernieres_demandes[codes_arduino.index(code)] = datetime.datetime.now()
def mettre_a_jour_receptions(self, code):
self.dates_dernieres_receptions[codes_arduino.index(code)] = datetime.datetime.now()
self.l_grandeurs_a_mesurer.remove(code)
class Communication_Arduino:
def __init__(self, lePort):
self.port = lePort
try:
self.port_serie = Serial(port=self.port, baudrate=9600, timeout=0)
print(self.port_serie.isOpen())
except SerialException:
print("port série introuvable")
def combien_temperature(self):
# combien_temperature
self.port_serie.write("t")
def combien_humidite(self):
# combien_humidite
self.port_serie.write("h")
def combien_pression(self):
# combien pression atmosphérique
self.port_serie.write("p")
def combien_temperature_interieure(self):
self.port_serie.write("i")
def arroser(self):
# arroser
self.port_serie.write("a")
def eteindre_arrosage(self):
# eteindre_arrosage
self.port_serie.write("e")
def demander_si_bonne_reception(self, nom_carte):
if nom_carte in noms_cartes_arduino:
if nom_carte == "beth":
self.port_serie.write("v")
elif nom_carte == "gimel":
self.port_serie.write("o")
retour = self.port_serie.readline()
time.sleep(3)
if retour == "connexion_bet_ok":
self.contact_beth = True
elif retour == "connexion_gimel_ok":
self.contact_gimel = True
else:
with open(os.path.join("static", "json_files", "log.json"), "a") as f:
json.dump({repr(datetime.datetime.now()): "plus de contact avec la carte "+nom_carte}, f)
else:
with open(os.path.join("static", "json_files", "log.json"), "a") as f:
json.dump({repr(datetime.datetime.now()): "mauvais nom de crate arduino"}, f)
# def en_train_d_arroser(self):
# self.port_serie.write("en_train_d_arroser")
def ecouter(self):
print("on lit")
return self.port_serie.readline()
def parler(self, a_envoyer):
#raw_input("écrire ici")
self.port_serie.write(a_envoyer)
return a_envoyer
def quitter(self):
self.port_serie.close()
if __name__ == "__main__":
if platform.system() == "Windows":
PORT = "COM3"
else:
PORT = "/dev/ttyACM0"
#try:
dec = Decideur(PORT)
#json_file = os.path.join("gestion_courriel", "client_secret.json")
#print json_file
#PROVENANCE_SURE = ["clemsciences@gmail.com","arrosage.b@gmail.com", "cendrine.besnier37@gmail.com", "patrick.besnier37@gmail.com"]
#DESTINATAIRES = ["clemsciences@gmail.com", "patrick.besnier37@gmail.com", "cendrine.besnier37@gmail.com"]
#gest = GestionnaireGmail(json_file, PROVENANCE_SURE, DESTINATAIRES)
dec.start()
#gest.start()
#except SerialException:
# print "port manquant"
#TODO envoyer un mail?
|
984,167 | 1e17c5651c5169caa9f243840f819c84e4dd6d26 | ag = '''
# rectangle.py
# C:\Users\lenovo\Desktop\2020\2020DeveloPython# abstract data type POO
'''
class Shape:
def __init__(self, xcoord, ycoord):
self.x = xcoord
self.y = ycoord
def __str__(self):
return 'x :' + str(self.x) + ' y :' + str(self.y)
def move(self, x1, y1):
self.x = self.x + x1
self.y = self.y + y1
class Rectangle(Shape): #inherit from ()
def __init__(self, xcoord, ycoord, width, heigth):
Shape.__init__(self, xcoord, ycoord)
self.width = width
self.heigth = heigth
def __str__(self):
retStr = Shape.__str__ (self)
retStr += ', width :' + str(self.width) + \
', heigth :' + str(self.heigth)
return retStr
rec = Rectangle(5,10,8,9)
print(rec)
rec.move(13,14)
print(rec)
|
984,168 | 5ab0b093b6fbb3567ec3ab904d0dabee714be5ac | # We have a string S of lowercase letters, and an integer array shifts.
# Call the shift of a letter, the next letter in the alphabet, (wrapping around so that 'z' becomes 'a').
# For example, shift('a') = 'b', shift('t') = 'u', and shift('z') = 'a'.
# Now for each shifts[i] = x, we want to shift the first i+1 letters of S, x times.
# Return the final string after all such shifts to S are applied.
class Solution:
def shiftingLetters(self, S: str, shifts: List[int]) -> str:
s = list(S)
newShiftsSum = 0
for i in range(len(S) - 1, -1, -1):
newShiftsSum += shifts[i]
s[i] = chr(((ord(s[i]) + newShiftsSum - 97) % 26) + 97)
return "".join(s)
|
984,169 | 1504e132d800b68c1f87757de2a7b14a54e111b9 | # ask user for input then prints to terminal output
answer = input("What is your name? ")
print("Hello, " + answer)
|
984,170 | 88dcb2cb26dd46033a80e10e8fb6253003adb5c0 | """
The rainymotion library provides different goodness of fit metrics for nowcasting models' performance evaluation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
### Regression metrics ###
def R(obs, sim):
"""
Correlation coefficient
Reference: https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.corrcoef.html
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
Returns:
float: correlation coefficient between observed and simulated values
"""
obs = obs.flatten()
sim = sim.flatten()
return np.corrcoef(obs, sim)[0, 1]
def R2(obs, sim):
"""
Coefficient of determination
Reference: http://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
Returns:
float: coefficient of determination between observed and simulated values
"""
obs = obs.flatten()
sim = sim.flatten()
numerator = ((obs - sim) ** 2).sum()
denominator = ((obs - np.mean(obs)) ** 2).sum()
return 1 - numerator/denominator
def RMSE(obs, sim):
"""
Root mean squared error
Reference: https://en.wikipedia.org/wiki/Root-mean-square_deviation
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
Returns:
float: root mean squared error between observed and simulated values
"""
obs = obs.flatten()
sim = sim.flatten()
return np.sqrt(np.mean((obs - sim) ** 2))
def MAE(obs, sim):
"""
Mean absolute error
Reference: https://en.wikipedia.org/wiki/Mean_absolute_error
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
Returns:
float: mean absolute error between observed and simulated values
"""
obs = obs.flatten()
sim = sim.flatten()
return np.mean(np.abs(sim - obs))
### Radar-specific classification metrics ###
def prep_clf(obs, sim, threshold=0.1):
obs = np.where(obs >= threshold, 1, 0)
sim = np.where(sim >= threshold, 1, 0)
# True positive (TP)
hits = np.sum((obs == 1) & (sim == 1))
# False negative (FN)
misses = np.sum((obs == 1) & (sim == 0))
# False positive (FP)
falsealarms = np.sum((obs == 0) & (sim == 1))
# True negative (TN)
correctnegatives = np.sum((obs == 0) & (sim == 0))
return hits, misses, falsealarms, correctnegatives
def CSI(obs, sim, threshold=0.1):
"""
CSI - critical success index
details in the paper:
Woo, W., & Wong, W. (2017).
Operational Application of Optical Flow Techniques to Radar-Based Rainfall Nowcasting.
Atmosphere, 8(3), 48. https://doi.org/10.3390/atmos8030048
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
threshold (float) : threshold for rainfall values binaryzation (rain/no rain)
Returns:
float: CSI value
"""
hits, misses, falsealarms, correctnegatives = prep_clf(obs=obs, sim=sim, threshold=threshold)
return hits / (hits + misses + falsealarms)
def FAR(obs, sim, threshold=0.1):
'''
FAR - false alarm rate
details in the paper:
Woo, W., & Wong, W. (2017).
Operational Application of Optical Flow Techniques to Radar-Based Rainfall Nowcasting.
Atmosphere, 8(3), 48. https://doi.org/10.3390/atmos8030048
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
threshold (float) : threshold for rainfall values binaryzation (rain/no rain)
Returns:
float: FAR value
'''
hits, misses, falsealarms, correctnegatives = prep_clf(obs=obs, sim=sim, threshold=threshold)
return falsealarms / (hits + falsealarms)
def POD(obs, sim, threshold=0.1):
'''
POD - probability of detection
details in the paper:
Woo, W., & Wong, W. (2017).
Operational Application of Optical Flow Techniques to Radar-Based Rainfall Nowcasting.
Atmosphere, 8(3), 48. https://doi.org/10.3390/atmos8030048
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
threshold (float) : threshold for rainfall values binaryzation (rain/no rain)
Returns:
float: POD value
'''
hits, misses, falsealarms, correctnegatives = prep_clf(obs=obs, sim=sim, threshold=threshold)
return hits / (hits + misses)
def HSS(obs, sim, threshold=0.1):
'''
HSS - Heidke skill score
details in the paper:
Woo, W., & Wong, W. (2017).
Operational Application of Optical Flow Techniques to Radar-Based Rainfall Nowcasting.
Atmosphere, 8(3), 48. https://doi.org/10.3390/atmos8030048
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
threshold (float) : threshold for rainfall values binaryzation (rain/no rain)
Returns:
float: HSS value
'''
hits, misses, falsealarms, correctnegatives = prep_clf(obs=obs, sim=sim, threshold=threshold)
HSS_num = 2 * (hits * correctnegatives - misses * falsealarms)
HSS_den = misses**2 + falsealarms**2 + 2*hits*correctnegatives + (misses + falsealarms)*(hits + correctnegatives)
return HSS_num / HSS_den
def ETS(obs, sim, threshold=0.1):
'''
ETS - Equitable Threat Score
details in the paper:
Winterrath, T., & Rosenow, W. (2007). A new module for the tracking of radar-derived
precipitation with model-derived winds. Advances in Geosciences, 10, 77–83. https://doi.org/10.5194/adgeo-10-77-2007
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
threshold (float) : threshold for rainfall values binaryzation (rain/no rain)
Returns:
float: ETS value
'''
hits, misses, falsealarms, correctnegatives = prep_clf(obs=obs, sim=sim, threshold=threshold)
Dr = ((hits + falsealarms) * (hits + misses)) / (hits + misses + falsealarms + correctnegatives)
ETS = (hits - Dr) / (hits + misses + falsealarms - Dr)
return ETS
def BSS(obs, sim, threshold=0.1):
'''
BSS - Brier skill score
details:
https://en.wikipedia.org/wiki/Brier_score
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
threshold (float) : threshold for rainfall values binaryzation (rain/no rain)
Returns:
float: BSS value
'''
obs = np.where(obs >= threshold, 1, 0)
sim = np.where(sim >= threshold, 1, 0)
obs = obs.flatten()
sim = sim.flatten()
return np.sqrt(np.mean((obs - sim) ** 2))
### ML-specific classification metrics ###
def ACC(obs, sim, threshold=0.1):
'''
ACC - accuracy score
details:
https://en.wikipedia.org/wiki/Accuracy_and_precision
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
threshold (float) : threshold for rainfall values binaryzation (rain/no rain)
Returns:
float: accuracy value
'''
TP, FN, FP, TN = prep_clf(obs=obs, sim=sim, threshold=threshold)
return (TP + TN) / (TP + TN + FP + FN)
def precision(obs, sim, threshold=0.1):
'''
precision - precision score
details:
https://en.wikipedia.org/wiki/Information_retrieval#Precision
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
threshold (float) : threshold for rainfall values binaryzation (rain/no rain)
Returns:
float: precision value
'''
TP, FN, FP, TN = prep_clf(obs=obs, sim=sim, threshold=threshold)
return TP / (TP + FP)
def recall(obs, sim, threshold=0.1):
'''
recall - recall score
details:
https://en.wikipedia.org/wiki/Information_retrieval#Recall
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
threshold (float) : threshold for rainfall values binaryzation (rain/no rain)
Returns:
float: recall value
'''
TP, FN, FP, TN = prep_clf(obs=obs, sim=sim, threshold=threshold)
return TP / (TP + FN)
def FSC(obs, sim, threshold=0.1):
'''
FSC - F-score
details:
https://en.wikipedia.org/wiki/F1_score
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
threshold (float) : threshold for rainfall values binaryzation (rain/no rain)
Returns:
float: FSC value
'''
pre = precision(obs, sim, threshold=threshold)
rec = recall(obs, sim, threshold=threshold)
return 2 * ( (pre * rec) / (pre + rec) )
def MCC(obs, sim, threshold=0.1):
'''
MCC - Matthews correlation coefficient
details:
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
threshold (float) : threshold for rainfall values binaryzation (rain/no rain)
Returns:
float: MCC value
'''
TP, FN, FP, TN = prep_clf(obs=obs, sim=sim, threshold=threshold)
MCC_num = TP * TN - FP * FN
MCC_den = np.sqrt( (TP + FP)*(TP + FN)*(TN + FP)*(TN + FN) )
return MCC_num / MCC_den
### Curves for plotting ###
def ROC_curve(obs, sim, thresholds):
'''
ROC - Receiver operating characteristic curve coordinates
Reference: https://en.wikipedia.org/wiki/Receiver_operating_characteristic
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
thresholds (list with floats): number of thresholds over which we consider rain falls
Returns:
tpr (numpy.ndarray): true positive rate according to selected thresholds (y axis on ROC)
fpr (numpy.ndarray): false positive rate according to selected thresholds (x axis on ROC)
'''
tpr = []
fpr = []
for threshold in thresholds:
TP, FN, FP, TN = prep_clf(obs=obs, sim=sim, threshold=threshold)
tpr.append(TP / (TP + FN))
fpr.append(FP / (FP + TN))
return np.array(tpr), np.array(fpr)
def PR_curve(obs, sim, thresholds):
'''
PRC - precision-recall curve coordinates
Reference: http://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
Args:
obs (numpy.ndarray): observations
sim (numpy.ndarray): simulations
thresholds (list with floats): number of thresholds over which we consider rain falls
Returns:
pre (numpy.ndarray): precision rate according to selected thresholds (y axis on PR)
rec (numpy.ndarray): recall rate according to selected thresholds (x axis on PR)
'''
pre = []
rec = []
for threshold in thresholds:
pre.append(precision(obs=obs, sim=sim, threshold=threshold))
rec.append(recall(obs=obs, sim=sim, threshold=threshold))
return np.array(pre), np.array(rec)
def AUC(x, y):
'''
AUC - area under curve
Note: area under curve wich had been computed by standard trapezial method (np.trapz)
Args:
x (numpy.ndarray): array of one metric rate (1D)
y (numpy.ndarray): array of another metric rate (1D)
Returns:
float - area under curve
'''
return np.trapz(y, x)
|
984,171 | ac37c63afe15674c0df7160bf1eba892613490ba | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 09:56:04 2018
@author: Asus
"""
# The Purpose:
# This Programming created for relizing the project for the paper "Support Tensor
# Machine for Text Categorization"
# Author: Spzhuang Date:2018-6-10
# Main Tool: sklearn 0.19.1
# ------------------load
import sklearn.svm as sv
from sklearn.cross_validation import train_test_split
import numpy as np
def generate_date(n1,n2,low_n1=10,high_n1=20,low_n2=15,high_n2=30,size_=(2,3)):
# Descibed: this function returns the positive data sets, positive labels, negative data sets
# and negative labels; the datas are generated via uniform distributed seed.
# Parameter: n1 is the positive number; n2 is the negative number ; low_n1,high_n1 are the bound of
# positive data; low_n2,high_n2 are the bound of negative data. size is the shape of data
# Output: data and its label
positive_data = list();positive_label = list()
for i in range(n1):
x = np.random.uniform(low=low_n1,high=high_n1,size=size_)
positive_data.append(x)
positive_label.append(1)
negative_data = list();negative_label = list()
for i in range(n2):
x = np.random.uniform(low=low_n2,high=high_n2,size=size_)
negative_data.append(x)
negative_label.append(-1)
data = list(positive_data); data_label=list(positive_label)
for i in range(n2):
data.append(negative_data[i])
data_label.append(negative_label[i])
return (data,data_label)
Dat,label = generate_date(50,50)
MData = train_test_split(Dat,label,test_size=0.25,stratify=label)
data_train,data_test,label_train,label_test = tuple(MData)
# ------------------data are generated
def sign(x):
if x>0:
return 1
elif x == 0:
return 0
else:
return -1
class stm():
# Described: this class is so-called Support Tensor Machine
def __init__(self,data,label,ns1,ns2): # ns1,ns2是数据矩阵的两个维度量
self.data = data
self.data_L = len(label)
self.label = label
self.u = np.ones(ns1)
self.u = self.u/np.linalg.norm(self.u)
self.v = np.ones(ns2)
self.v = self.v/np.linalg.norm(self.v)
self.error = 10
self.tol = 0.005
self.max_iter = 200
def train(self):
inde = 1
while self.error>self.tol and inde<self.max_iter :
old_norm = np.linalg.norm(np.outer(self.u,self.v))
i = int
for i in range(2):
svm = sv.SVC(kernel='linear',C=5)
if i == 0:
tem_data = list()
for j in range(self.data_L):
tem_data.append(self.u @ self.data[j])
del j
tem_data = np.array(tem_data)
norm_u = np.linalg.norm(self.u)
svm.fit(tem_data,self.label)
b = svm.intercept_
bb = list()
for j in range(self.data_L):
bb.append(b+self.label[j])
del j
bb = np.array(bb)
w= np.linalg.pinv(tem_data).dot(bb)
w = w.flatten()
self.v = w/norm_u
if i == 1:
tem_data = list()
for j in range(self.data_L):
tem_data.append(self.data[j] @ self.v)
del j
tem_data = np.array(tem_data)
norm_v = np.linalg.norm(self.v)
svm.fit(tem_data,self.label)
b = svm.intercept_
bb = list()
for j in range(self.data_L):
bb.append(b+self.label[j])
del j
bb = np.array(bb)
w= np.linalg.pinv(tem_data).dot(bb)
w = w.flatten()
self.u = w/norm_v
SupTensorIndex= list(svm.support_)
SupTensor = dict()
for j in range(len(SupTensorIndex)):
SupTensor[SupTensorIndex[j]] = self.data[j]
del j
del i
self.coe_tensor = np.outer(self.u,self.v)
self.error = np.abs(old_norm-np.linalg.norm(self.coe_tensor))
self.SupTensorIndex = SupTensorIndex
self.SupTensor = SupTensor
if inde >= 1:
print('迭代次数:%d '% inde)
print('迭代误差:%.2f' % self.error)
inde += 1
self.bias = []
for k in self.SupTensor.keys():
b = self.label[k] - sign((self.u @ self.SupTensor[k] @ self.v))
self.bias.append(b)
self.bias = float(np.average(np.array(self.bias)))
def predict(self,z):
pred = sign(np.sum(z*self.coe_tensor)+self.bias)
return pred
def score(self,test,test_label):
L = len(test_label)
flag = 0
for i in range(L):
if self.predict(test[i]) == test_label[i]:
flag += 1
del i
scor = flag/L
return scor
stm1 = stm(data=data_train,label=label_train,ns1=2,ns2=3)
stm1.train()
print('done!!')
score = stm1.score(data_test,label_test)
|
984,172 | 1730a25664ea700ce39f6f93bcebb5fb5c2daf2e | from gtts import gTTS
s = input("Enter Your Name: ")
tts = gTTS(text="Hello ! Welcome "+s, lang="en")
tts.save("sample.mp3") |
984,173 | 1e51336142b7b7b681ca65b7481efe623c4c4a1e | '''
Most of the functions present here came in the original repo, we only took what's needed and not
the enitre script
'''
import numpy as np
from PIL import Image
import torch
from scipy import ndimage
def DepthNorm(x, maxDepth):
return maxDepth / x
def predict(model, image, minDepth=10, maxDepth=1000):
with torch.no_grad():
pytorch_input = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(0)
# Compute predictions
predictions = model(pytorch_input)
# Put in expected range
return (np.clip(DepthNorm(predictions.numpy(), maxDepth=maxDepth), minDepth, maxDepth) / maxDepth)[0][0]
# This our version of the point cloud computation function, the rest of the functions came in
# the original repo
def to_pcd(depth, cx_d, fx_d, cy_d, fy_d):
mat = np.array([[_ for _ in range(depth.shape[0])] for _ in
range(depth.shape[1])])
x = (mat.transpose() - cx_d) * depth / fx_d
y = -((np.array([[_ for _ in range(depth.shape[1])] for _ in
range(depth.shape[0])]) - cy_d) * depth / fy_d)
return np.column_stack((x.flatten(), y.flatten(), depth.flatten()))
def edges(d):
dx = ndimage.sobel(d, 0) # horizontal derivative
dy = ndimage.sobel(d, 1) # vertical derivative
return np.abs(dx) + np.abs(dy)
def worldCoords(width, height):
import math
hfov_degrees, vfov_degrees = 57, 43
hFov = math.radians(hfov_degrees)
vFov = math.radians(vfov_degrees)
cx, cy = width / 2, height / 2
fx = width / (2 * math.tan(hFov / 2))
fy = height / (2 * math.tan(vFov / 2))
xx, yy = np.tile(range(width), height), np.repeat(range(height), width)
xx = (xx - cx) / fx
yy = (yy - cy) / fy
return xx, yy
def posFromDepth(depth, xx, yy):
length = depth.shape[0] * depth.shape[1]
# depth[edges(depth) > 0.3] = 1e6 # Hide depth edges
z = depth.reshape(length)
return np.dstack((xx * z, yy * z, z)).reshape((length, 3))
def scale_up(scale, img):
from skimage.transform import resize
output_shape = (scale * img.shape[0], scale * img.shape[1])
return resize(img, output_shape, order=1, preserve_range=True, mode='reflect', anti_aliasing=True)
def load_images(image_files):
loaded_images = []
for file in image_files:
x = np.clip(np.asarray(Image.open(file), dtype=float) / 255, 0, 1)
loaded_images.append(x)
return np.stack(loaded_images, axis=0)
def to_multichannel(i):
if i.shape[2] == 3: return i
i = i[:, :, 0]
return np.stack((i, i, i), axis=2)
def display_images(outputs, inputs=None, gt=None, is_colormap=True, is_rescale=True):
import matplotlib.pyplot as plt
import skimage
from skimage.transform import resize
plasma = plt.get_cmap('plasma')
shape = (outputs[0].shape[0], outputs[0].shape[1], 3)
all_images = []
for i in range(outputs.shape[0]):
imgs = []
if isinstance(inputs, (list, tuple, np.ndarray)):
x = to_multichannel(inputs[i])
x = resize(x, shape, preserve_range=True, mode='reflect', anti_aliasing=True)
imgs.append(x)
if isinstance(gt, (list, tuple, np.ndarray)):
x = to_multichannel(gt[i])
x = resize(x, shape, preserve_range=True, mode='reflect', anti_aliasing=True)
imgs.append(x)
if is_colormap:
rescaled = outputs[i][:, :, 0]
if is_rescale:
rescaled = rescaled - np.min(rescaled)
rescaled = rescaled / np.max(rescaled)
imgs.append(plasma(rescaled)[:, :, :3])
else:
imgs.append(to_multichannel(outputs[i]))
img_set = np.hstack(imgs)
all_images.append(img_set)
all_images = np.stack(all_images)
return skimage.util.montage(all_images, multichannel=True, fill=(0, 0, 0))
def save_images(filename, outputs, inputs=None, gt=None, is_colormap=True, is_rescale=False):
montage = display_images(outputs, inputs, is_colormap, is_rescale)
im = Image.fromarray(np.uint8(montage * 255))
im.save(filename)
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(gt - pred) / gt)
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()
return a1, a2, a3, abs_rel, rmse, log_10
def evaluate(model, rgb, depth, crop, batch_size=6, verbose=False):
N = len(rgb)
bs = batch_size
predictions = []
testSetDepths = []
for i in range(N // bs):
x = rgb[(i) * bs:(i + 1) * bs, :, :, :]
# Compute results
true_y = depth[(i) * bs:(i + 1) * bs, :, :]
pred_y = scale_up(2, predict(model, x / 255, minDepth=10, maxDepth=1000, batch_size=bs)[:, :, :, 0]) * 10.0
# Test time augmentation: mirror image estimate
pred_y_flip = scale_up(2,
predict(model, x[..., ::-1, :] / 255, minDepth=10, maxDepth=1000, batch_size=bs)[:, :, :,
0]) * 10.0
# Crop based on Eigen et al. crop
true_y = true_y[:, crop[0]:crop[1] + 1, crop[2]:crop[3] + 1]
pred_y = pred_y[:, crop[0]:crop[1] + 1, crop[2]:crop[3] + 1]
pred_y_flip = pred_y_flip[:, crop[0]:crop[1] + 1, crop[2]:crop[3] + 1]
# Compute errors per image in batch
for j in range(len(true_y)):
predictions.append((0.5 * pred_y[j]) + (0.5 * np.fliplr(pred_y_flip[j])))
testSetDepths.append(true_y[j])
predictions = np.stack(predictions, axis=0)
testSetDepths = np.stack(testSetDepths, axis=0)
e = compute_errors(predictions, testSetDepths)
if verbose:
print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format('a1', 'a2', 'a3', 'rel', 'rms', 'log_10'))
print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format(e[0], e[1], e[2], e[3], e[4], e[5]))
return e
|
984,174 | 4a4ce451104af1714f687ca08da7c3c3f08887ab | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Contact'
db.create_table('addressbook_contact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=13, blank=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('mi', self.gf('django.db.models.fields.CharField')(max_length=2, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('address1', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('address2', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('address3', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2, blank=True)),
('zip', self.gf('django.db.models.fields.CharField')(max_length=5, blank=True)),
('country', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('fax', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('grad_class', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
))
db.send_create_signal('addressbook', ['Contact'])
def backwards(self, orm):
# Deleting model 'Contact'
db.delete_table('addressbook_contact')
models = {
'addressbook.contact': {
'Meta': {'object_name': 'Contact'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'address3': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'grad_class': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'mi': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'})
}
}
complete_apps = ['addressbook'] |
984,175 | efeeb1040bcc762b5e86d5dcda83e6a588b28c25 | import cv2
import numpy as np
import skimage.feature
import cv2 #for resizing image
import glob as gb
import skimage.measure
from skimage.filters import gaussian
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.min(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper,apertureSize = 3,L2gradient= True)
# return the edged image
return edged
img_path = gb.glob("data/Cold_biopsy/loop/*.png")
num = 0
for path in img_path:
img = cv2.imread(path)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow('gray',img)
blurred = cv2.GaussianBlur(gray, (7, 7), 0)
#edges = cv2.Canny(blurred,20,150,apertureSize = 3,L2gradient= True)
#edges = cv2.Canny(blurred,20,100)
edges = auto_canny(blurred)
cv2.imshow('blur',blurred)
kernel = np.ones((3,3),np.uint8)
#mask = cv2.erode(edges,kernel)
mask = cv2.dilate(edges,kernel)
cv2.imshow('edge',mask)
_,contours,hierarchy= cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
mask = np.zeros(gray.shape,np.uint8)
all = []
for cnt in contours:
hull = cv2.convexHull(cnt)
area = cv2.contourArea(cnt)
rect = cv2.minAreaRect(hull)#( center (x,y), (width, height), angle of rotation )
box = np.int0(cv2.boxPoints(rect))
rect_w,rect_h = rect[1]
if area >= 1200 and rect_w >= 80:
all.append(hull)
cv2.drawContours(img,[cnt], -1, (0,255,0), 3)
#cv2.drawContours(mask,[hull],0,255,-1)
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
pixelpoints = np.transpose(np.nonzero(mask))
kernel = np.ones((3,3),np.uint8)
mask = cv2.dilate(mask,kernel,iterations=1)
mask = cv2.erode(mask,kernel,iterations=1)
'''rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
print(rect[1])'''
if all!= []:
text = 'forceps biopsy'
cv2.putText(img,text,(100,100),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), lineType=cv2.LINE_AA)
num += 1
cv2.drawContours(mask,all,0,255,-1)
print(num)
cv2.imshow('mask',mask)
#draw_lines(img,lines)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
984,176 | 3cedc2a1fa2edaecec3602f98c79d2ff241c905d | #Programmer: Zhaoyang Dai
#Section: 22C:016:A06
#ID:00719596
def mostFrequent(dictionary):
k=dictionary.values() #get the values of the dictionary
k.sort() #sort the list k from low to high
k.reverse() #change it from high to low
L=[]
m=[]
for values in k: #get the values from the list k
for i in range(len(k)):
if values==dictionary.values()[i]: #to get the corresponding key of the value
if dictionary.keys()[i] not in L:
L.append(dictionary.keys()[i])
if len(L)>=50: #to get the top 50 words we frequently use
for i in range(50):
m.append(L[i])
else: #see if the dictionary has less than 50 items
m=L #output all in high to low order
return m |
984,177 | f8b610da95bef640d1643a1c1be7914595d483af | from app.models import db, Representative
import datetime
def seed_executive():
president = Representative(
bioguide_id='not available',
short_title='Pres.',
first_name='Donald',
last_name='Trump',
date_of_birth=datetime.datetime(1946, 6, 14),
party='R',
twitter_handle='potus',
facebook_handle='DonaldTrump',
youtube_handle='whitehouse',
image_url='https://www.whitehouse.gov/sites/whitehouse.gov/files/images/45/PE%20Color.jpg',
website_url='https://www.whitehouse.gov/',
contact_url='https://www.whitehouse.gov/contact/',
in_office=True,
next_election=None,
ocd_id='not available',
phone='202-456-1111',
state_id='DC',
missed_votes_pct=None,
votes_with_party_pct=None,
votes_against_party_pct=None
)
vice_president = Representative(
bioguide_id='P000587',
short_title='VP',
first_name='Mike',
last_name='Pence',
date_of_birth=datetime.datetime(1959, 6, 7),
party='R',
twitter_handle='VP',
facebook_handle='mikepence',
youtube_handle=None,
image_url='https://www.whitehouse.gov/sites/whitehouse.gov/files/images/45/VPE%20Color.jpg',
website_url='https://www.whitehouse.gov/',
contact_url='https://www.whitehouse.gov/contact/',
in_office=True,
next_election=None,
ocd_id='not available',
phone='202-456-1111',
state_id='DC',
missed_votes_pct=None,
votes_with_party_pct=None,
votes_against_party_pct=None
)
db.session.add(president)
db.session.add(vice_president)
db.session.commit()
def undo_executive():
president = Representative.query.filter(Representative.short_title == 'Pres.').one()
db.session.delete(president)
vice_president = Representative.query.filter(Representative.short_title == 'VP').one()
db.session.delete(vice_president)
db.session.commit() |
984,178 | c1c3915c55bccaf1f88d53bd16e90401b1c17152 | def defangIPaddr(address: str):
result = []
for i in address:
if i == ".":
result.append("[.]")
else:
result.append(i)
return "".join(map(str,result))
if __name__ == "__main__":
print(defangIPaddr("1.1.1.1"))
|
984,179 | f09192ba967e076cc277f6475ce3348e64db9011 | import uuid
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
def global_common(request):
return {
}
|
984,180 | 6069ac6cbf5d5887d6d1a9218a65fc5ce0a46ae9 | def main():
#escribe tu código abajo de esta línea
pass
grade1=float(input("Give me grade 1"))
grade2=float(input("Give me grade 2"))
grade3=float(input("Give me grade 3"))
grade4=float(input("Give me grade 4"))
average=((grade1+grade2+grade3+grade4)/4)
print("Your average is", average)
if __name__ == '__main__':
main()
|
984,181 | babd3ea73a37722a5275695c9fe0a334c6b6b4b6 | from django.urls import path
from .import views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', views.index, name='index'),
path('contact/', views.contact, name='contact'),
path('gallery/', views.gallery, name='gallery'),
path('about/', views.about, name='about'),
path('services/', views.services, name='services'),
path('register/', views.register, name='register'),
path('wel/', views.wel, name='wel'),
path('profile/', views.profile, name='profile'),
path('base/', views.base, name='base'),
path('adminlogin/', views.adminlogin, name='adminlogin'),
path('adminpage/', views.adminpage, name='adminpage'),
path('adminlogout/', views.adminlogout, name='adminlogout'),
path('newadmin/', views.newadmin, name='newadmin'),
path('addbook/', views.addbook, name='addbook'),
path('removebook/', views.removebook, name='removebook'),
path('editbook/', views.editbook, name='editbook'),
path('notice/', views.notice, name='notice'),
path('studentaddbook/', views.studentaddbook, name='studentaddbook'),
path('bookissue/', views.bookissue, name='bookissue'),
path('issuedbook/', views.issuedbook, name='issuedbook'),
path('returnbook/', views.returnbook, name='returnbook'),
path('requestbook/', views.requestbook, name='requestbook'),
path('booklist/', views.booklist, name='booklist'),
path('removebynumber/', views.removebynumber, name='removebynumber'),
path('deletebynumber/', views.deletebynumber, name='deletebynumber'),
path('deletepost/', views.deletepost, name='deletepost'),
path('deletepostbypopup/', views.deletepostbypopup, name='deletepostbypopup'),
path('verifyrequest/', views.verifyrequest, name='verifyrequest'),
path('studentrequest/', views.studentrequest, name='studentrequest'),
path('statusrequest/', views.statusrequest, name='statusrequest'),
path('deleterequest/', views.deleterequest, name='deleterequest'),
path('payfine/', views.payfine, name='payfine'),
] |
984,182 | 6e57b20e4978068406da16f3d4c419cf813802ac | import matplotlib.pyplot as plt
import os
from pylab import *
import logging
mpl.rcParams['font.sans-serif'] = ['SimHei']
def get_file():
path = os.path.join(os.getcwd(), 'data/data')
filename = os.path.join(path, '1000725.csv')
data = get_data(filename)
draw(filename, data)
def get_data(filename):
data = {
'date': [],
'id': [],
'name': [],
'end': [],
'high': [],
'low': [],
'begin': [],
'yesterday': [],
'change': [],
'rate': [],
'vol': [],
'total': []
}
with open(filename, 'r') as f:
content = f.readlines()
content.reverse()
for i in content[-300:-1]:
date = i.split(',')[0]
id = i.split(',')[1]
name = i.split(',')[2]
end = float(i.split(',')[3])
high = float(i.split(',')[4])
low = float(i.split(',')[5])
begin = float(i.split(',')[6])
yesterday = float(i.split(',')[7])
if i.split(',')[8] == 'None':
data['date'].append(date)
data['id'].append(id)
data['name'].append(name)
data['end'].append(end)
data['high'].append(high)
data['low'].append(low)
data['begin'].append(begin)
data['yesterday'].append(yesterday)
data['change'].append(0)
data['rate'].append(0)
data['vol'].append(0)
data['total'].append(0)
continue
change = float(i.split(',')[8])
rate = float(i.split(',')[9])
vol = float(i.split(',')[10])
total = float(i.split(',')[11])
print(i[:-1])
data['date'].append(date)
data['id'].append(id)
data['name'].append(name)
data['end'].append(end)
data['high'].append(high)
data['low'].append(low)
data['begin'].append(begin)
data['yesterday'].append(yesterday)
data['change'].append(change)
data['rate'].append(rate)
data['vol'].append(vol)
data['total'].append(total)
return data
def draw(filename, data):
logging.debug('图片生成中...')
fig = plt.figure(figsize=(30, 18), dpi=80)
name = data['name'][0]
ax1 = fig.add_subplot(1, 1, 1)
ax1.plot(data['date'], data['end'], label='price', color='red')
ax1.set_xlabel('date')
ax1.set_ylabel('price')
ax1.set_title('{} - {}'.format(filename, name))
ax1.legend()
ax2 = ax1.twinx()
ax2.bar(data['date'], data['vol'], label='vol', color='blue')
ax2.set_ylabel('vol')
ax2.legend()
# plt.title('{} - {}'.format(filename, name))
logging.debug('请稍等...')
plt.show()
logging.debug('图片生成完成')
if __name__ == '__main__':
get_file() |
984,183 | b72484abb530d4bf2bf979eb000d234e7ea8228d | class Player:
def __init__(self, name):
self.name = name;
self.deck = [];
def getName(self):
return self.name
def addCard(self, card):
self.deck.append(card)
def removeCard(self, card):
self.deck.remove(card)
def getDeck(self):
return self.deck
def printDeck(self):
deckDashes = ""
deckData = ""
cardNumberData = ""
cardNumber = 0
for card in self.getDeck():
cardData = "{0} {1}".format(card.color, card.id)
dashLength = len(cardData)
spacedDashLength = dashLength - 1
deckData += "| {} |".format(cardData)
deckDashes += "| {} |".format("-" * dashLength)
cardNumberData += "| {0}{1} |".format(cardNumber, " " * spacedDashLength)
cardNumber += 1
print(deckDashes)
print(deckData)
print(deckDashes)
print(deckDashes)
print(cardNumberData)
print(deckDashes)
|
984,184 | 944a950b92123e5ea439630f31764117b7eaf735 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from . import resharing
import crypten.communicator as comm
from crypten.common.util import torch_stack
def __linear_circuit_block(x_block, y_block, encoder):
from .binary import BinarySharedTensor
from crypten.cuda import CUDALongTensor
ci = torch_stack([torch.zeros_like(x_block), torch.ones_like(y_block)])
for i in range(8):
xi = (x_block >> i) & 1
yi = (y_block >> i) & 1
xi, yi = torch_stack([xi, xi]), torch_stack([yi,yi])
si = xi ^ yi ^ ci
ci = ci ^ resharing.AND_gate(xi ^ ci, yi ^ ci).share
select_bits = torch.zeros_like(ci[0,0])
for i in range(8):
select_bits = resharing.AND_gate(select_bits ^ 1, ci[0,i]).share ^ resharing.AND_gate(select_bits, ci[1,i]).share
sign_bits = resharing.AND_gate(select_bits ^ 1, si[0,7]).share ^ resharing.AND_gate(select_bits, si[1,7]).share
sign_bits = sign_bits.long()
if sign_bits.is_cuda:
sign_bits = CUDALongTensor(sign_bits)
sign_bits = BinarySharedTensor.from_shares(sign_bits, src=comm.get().get_rank())
sign_bits.encoder = encoder
return sign_bits
def extract_msb(x, y):
x_block = torch_stack(
[((x.share >> (i*8)) & 255).byte() for i in range(8)]
)
y_block = torch_stack(
[((y.share >> (i*8)) & 255).byte() for i in range(8)]
)
return __linear_circuit_block(x_block, y_block, x.encoder)
|
984,185 | 2fca04590d88a4521c0fa70048836d37a023147c | #117CS0263 - Ashwin Sekhari
def write_output(x,y):
"""
Output to a file
"""
f = open("rail_output.txt", "w")
print('\n#====== Rail Fence Cipher ======#', file=f)
print('Text: ' + y, file=f)
print('Encrypted: ' + x, file=f)
print('Decrypted: ' + y, file=f)
print('#----------------------------#\n',file=f)
f.close()
print('\n#====== Rail Fence Cipher ======#')
print('Text: ' + y)
print('Encrypted: ' + x)
print('Decrypted: ' + y)
print('#----------------------------#\n')
def encrypt(text, key):
"""
Function to encrypt the plain text
"""
rail = [['\n' for i in range(len(text))] for j in range(key)]
dir_down = False
row, col = 0, 0
for i in range(len(text)):
if (row == 0) or (row == key - 1):
dir_down = not dir_down
rail[row][col] = text[i]
col += 1
if dir_down:
row += 1
else:
row -= 1
result = []
for i in range(key):
for j in range(len(text)):
if rail[i][j] != '\n':
result.append(rail[i][j])
return("" . join(result))
def decrypt(cipher, key):
"""
Function to decrypt the plain text
"""
rail = [['\n' for i in range(len(cipher))] for j in range(key)]
dir_down = None
row, col = 0, 0
for i in range(len(cipher)):
if row == 0:
dir_down = True
if row == key - 1:
dir_down = False
rail[row][col] = '*'
col += 1
if dir_down:
row += 1
else:
row -= 1
index = 0
for i in range(key):
for j in range(len(cipher)):
if ((rail[i][j] == '*') and
(index < len(cipher))):
rail[i][j] = cipher[index]
index += 1
result = []
row, col = 0, 0
for i in range(len(cipher)):
if row == 0:
dir_down = True
if row == key-1:
dir_down = False
if (rail[row][col] != '*'):
result.append(rail[row][col])
col += 1
if dir_down:
row += 1
else:
row -= 1
return("".join(result))
def read_input():
"""
Reading input from files
"""
tx = []
with open("rail_input.txt", "r") as file:
data = file.readlines()
for line in data:
tx.append(line.strip())
return tx
text = read_input()
cipher = encrypt(text, 2)
res = decrypt(cipher, 2)
write_output(res, cipher)
|
984,186 | d72e2b9deb76dc94f105de30fc6377c9018551dc | # -*- coding: utf-8 -*-
#
# Copyright (c) 2019 PAL Robotics SL.
# All rights reserved.
#
# Software License Agreement (BSD License 2.0)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of PAL Robotics SL. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import importlib
import typing
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
from rclpy.parameter import PARAMETER_SEPARATOR_STRING
from rosidl_runtime_py import set_message_fields
import sensor_msgs.msg
class JoyTeleopException(Exception):
pass
def get_interface_type(type_name: str, interface_type: str) -> typing.Any:
split = type_name.split('/')
if len(split) != 3:
raise JoyTeleopException("Invalid type_name '{}'".format(type_name))
package = split[0]
interface = split[1]
message = split[2]
if interface != interface_type:
raise JoyTeleopException("Cannot use interface of type '{}' for an '{}'"
.format(interface, interface_type))
mod = importlib.import_module(package + '.' + interface_type)
return getattr(mod, message)
def set_member(msg: typing.Any, member: str, value: typing.Any) -> None:
ml = member.split('-')
if len(ml) < 1:
return
target = msg
for i in ml[:-1]:
target = getattr(target, i)
setattr(target, ml[-1], value)
class JoyTeleopCommand:
def __init__(self, name: str, config: typing.Dict[str, typing.Any],
button_name: str, axes_name: str) -> None:
self.buttons: typing.List[str] = []
if button_name in config:
self.buttons = config[button_name]
self.axes: typing.List[str] = []
if axes_name in config:
self.axes = config[axes_name]
if len(self.buttons) == 0 and len(self.axes) == 0:
raise JoyTeleopException("No buttons or axes configured for command '{}'".format(name))
# Used to short-circuit the run command if there aren't enough buttons in the message.
self.min_button = 0
if len(self.buttons) > 0:
self.min_button = int(min(self.buttons))
self.min_axis = 0
if len(self.axes) > 0:
self.min_axis = int(min(self.axes))
# This can be used to "debounce" the message; if there are multiple presses of the buttons
# or axes, the command may only activate on the first one until it toggles again. But this
# is a command-specific behavior, the base class only provides the mechanism.
self.active = False
def update_active_from_buttons_and_axes(self, joy_state: sensor_msgs.msg.Joy) -> None:
self.active = False
if (self.min_button is not None and len(joy_state.buttons) <= self.min_button) and \
(self.min_axis is not None and len(joy_state.axes) <= self.min_axis):
# Not enough buttons or axes, so it can't possibly be a message for this command.
return
for button in self.buttons:
try:
self.active |= joy_state.buttons[button] == 1
except IndexError:
# An index error can occur if this command is configured for multiple buttons
# like (0, 10), but the length of the joystick buttons is only 1. Ignore these.
pass
for axis in self.axes:
try:
self.active |= joy_state.axes[axis] == 1.0
except IndexError:
# An index error can occur if this command is configured for multiple buttons
# like (0, 10), but the length of the joystick buttons is only 1. Ignore these.
pass
class JoyTeleopTopicCommand(JoyTeleopCommand):
def __init__(self, name: str, config: typing.Dict[str, typing.Any], node: Node) -> None:
super().__init__(name, config, 'deadman_buttons', 'deadman_axes')
self.name = name
self.topic_type = get_interface_type(config['interface_type'], 'msg')
# A 'message_value' is a fixed message that is sent in response to an activation. It is
# mutually exclusive with an 'axis_mapping'.
self.msg_value = None
if 'message_value' in config:
msg_config = config['message_value']
# Construct the fixed message and try to fill it in. This message will be reused
# during runtime, and has the side benefit of giving the user early feedback if the
# config can't work.
self.msg_value = self.topic_type()
for target, param in msg_config.items():
set_member(self.msg_value, target, param['value'])
# An 'axis_mapping' takes data from one part of the message and scales and offsets it to
# publish if an activation happens. This is typically used to take joystick analog data
# and republish it as a cmd_vel. It is mutually exclusive with a 'message_value'.
self.axis_mappings = {}
if 'axis_mappings' in config:
self.axis_mappings = config['axis_mappings']
# Now check that the mappings have all of the required configuration.
for mapping, values in self.axis_mappings.items():
if 'axis' not in values and 'button' not in values and 'value' not in values:
raise JoyTeleopException("Axis mapping for '{}' must have an axis, button, "
'or value'.format(name))
if 'axis' in values:
if 'offset' not in values:
raise JoyTeleopException("Axis mapping for '{}' must have an offset"
.format(name))
if 'scale' not in values:
raise JoyTeleopException("Axis mapping for '{}' must have a scale"
.format(name))
if self.msg_value is None and not self.axis_mappings:
raise JoyTeleopException("No 'message_value' or 'axis_mappings' "
"configured for command '{}'".format(name))
if self.msg_value is not None and self.axis_mappings:
raise JoyTeleopException("Only one of 'message_value' or 'axis_mappings' "
"can be configured for command '{}'".format(name))
qos = rclpy.qos.QoSProfile(history=rclpy.qos.QoSHistoryPolicy.KEEP_LAST,
depth=1,
reliability=rclpy.qos.QoSReliabilityPolicy.RELIABLE,
durability=rclpy.qos.QoSDurabilityPolicy.VOLATILE)
self.pub = node.create_publisher(self.topic_type, config['topic_name'], qos)
def run(self, node: Node, joy_state: sensor_msgs.msg.Joy) -> None:
# The logic for responding to this joystick press is:
# 1. Save off the current state of active.
# 2. Update the current state of active based on buttons and axes.
# 3. If this command is currently not active, return without publishing.
# 4. If this is a msg_value, and the value of the previous active is the same as now,
# debounce and return without publishing.
# 5. In all other cases, publish. This means that this is a msg_value and the button
# transitioned from 0 -> 1, or it means that this is an axis mapping and data should
# continue to be published without debouncing.
last_active = self.active
self.update_active_from_buttons_and_axes(joy_state)
if not self.active:
return
if self.msg_value is not None and last_active == self.active:
return
if self.msg_value is not None:
# This is the case for a static message.
msg = self.msg_value
else:
# This is the case to forward along mappings.
msg = self.topic_type()
for mapping, values in self.axis_mappings.items():
if 'axis' in values:
if len(joy_state.axes) > values['axis']:
val = joy_state.axes[values['axis']] * values.get('scale', 1.0) + \
values.get('offset', 0.0)
else:
node.get_logger().error('Joystick has only {} axes (indexed from 0),'
'but #{} was referenced in config.'.format(
len(joy_state.axes), values['axis']))
val = 0.0
elif 'button' in values:
if len(joy_state.buttons) > values['button']:
val = joy_state.buttons[values['button']] * values.get('scale', 1.0) + \
values.get('offset', 0.0)
else:
node.get_logger().error('Joystick has only {} buttons (indexed from 0),'
'but #{} was referenced in config.'.format(
len(joy_state.buttons), values['button']))
val = 0.0
elif 'value' in values:
# Pass on the value as its Python-implicit type
val = values.get('value')
else:
node.get_logger().error(
'No Supported axis_mappings type found in: {}'.format(mapping))
val = 0.0
set_member(msg, mapping, val)
# If there is a stamp field, fill it with now().
if hasattr(msg, 'header'):
msg.header.stamp = node.get_clock().now().to_msg()
self.pub.publish(msg)
class JoyTeleopServiceCommand(JoyTeleopCommand):
def __init__(self, name: str, config: typing.Dict[str, typing.Any], node: Node) -> None:
super().__init__(name, config, 'buttons', 'axes')
self.name = name
service_name = config['service_name']
service_type = get_interface_type(config['interface_type'], 'srv')
self.request = service_type.Request()
if 'service_request' in config:
# Set the message fields in the request in the constructor. This request will be used
# during runtime, and has the side benefit of giving the user early feedback if the
# config can't work.
set_message_fields(self.request, config['service_request'])
self.service_client = node.create_client(service_type, service_name)
self.client_ready = False
def run(self, node: Node, joy_state: sensor_msgs.msg.Joy) -> None:
# The logic for responding to this joystick press is:
# 1. Save off the current state of active.
# 2. Update the current state of active.
# 3. If this command is currently not active, return without calling the service.
# 4. Save off the current state of whether the service was ready.
# 5. Update whether the service is ready.
# 6. If the service is not currently ready, return without calling the service.
# 7. If the service was already ready, and the state of the button is the same as before,
# debounce and return without calling the service.
# 8. In all other cases, call the service. This means that either this is a button
# transition 0 -> 1, or that the service became ready since the last call.
last_active = self.active
self.update_active_from_buttons_and_axes(joy_state)
if not self.active:
return
last_ready = self.client_ready
self.client_ready = self.service_client.service_is_ready()
if not self.client_ready:
return
if last_ready == self.client_ready and last_active == self.active:
return
self.service_client.call_async(self.request)
class JoyTeleopActionCommand(JoyTeleopCommand):
def __init__(self, name: str, config: typing.Dict[str, typing.Any], node: Node) -> None:
super().__init__(name, config, 'buttons', 'axes')
self.name = name
action_type = get_interface_type(config['interface_type'], 'action')
self.goal = action_type.Goal()
if 'action_goal' in config:
# Set the message fields for the goal in the constructor. This goal will be used
# during runtime, and has hte side benefit of giving the user early feedback if the
# config can't work.
set_message_fields(self.goal, config['action_goal'])
action_name = config['action_name']
self.action_client = ActionClient(node, action_type, action_name)
self.client_ready = False
def run(self, node: Node, joy_state: sensor_msgs.msg.Joy) -> None:
# The logic for responding to this joystick press is:
# 1. Save off the current state of active.
# 2. Update the current state of active.
# 3. If this command is currently not active, return without calling the action.
# 4. Save off the current state of whether the action was ready.
# 5. Update whether the action is ready.
# 6. If the action is not currently ready, return without calling the action.
# 7. If the action was already ready, and the state of the button is the same as before,
# debounce and return without calling the action.
# 8. In all other cases, call the action. This means that either this is a button
# transition 0 -> 1, or that the action became ready since the last call.
last_active = self.active
self.update_active_from_buttons_and_axes(joy_state)
if not self.active:
return
last_ready = self.client_ready
self.client_ready = self.action_client.server_is_ready()
if not self.client_ready:
return
if last_ready == self.client_ready and last_active == self.active:
return
self.action_client.send_goal_async(self.goal)
class JoyTeleop(Node):
"""
Generic joystick teleoperation node.
Will not start without configuration, has to be stored in 'teleop' parameter.
See config/joy_teleop.yaml for an example.
"""
def __init__(self):
super().__init__('joy_teleop', allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True)
self.commands = []
names = []
for name, config in self.retrieve_config().items():
if name in names:
raise JoyTeleopException("command '{}' was duplicated".format(name))
try:
interface_group = config['type']
if interface_group == 'topic':
self.commands.append(JoyTeleopTopicCommand(name, config, self))
elif interface_group == 'service':
self.commands.append(JoyTeleopServiceCommand(name, config, self))
elif interface_group == 'action':
self.commands.append(JoyTeleopActionCommand(name, config, self))
else:
raise JoyTeleopException("unknown type '{interface_group}' "
"for command '{name}'".format_map(locals()))
except TypeError:
# This can happen on parameters we don't control, like 'use_sim_time'.
self.get_logger().debug('parameter {} is not a dict'.format(name))
names.append(name)
# Don't subscribe until everything has been initialized.
qos = rclpy.qos.QoSProfile(history=rclpy.qos.QoSHistoryPolicy.KEEP_LAST,
depth=1,
reliability=rclpy.qos.QoSReliabilityPolicy.RELIABLE,
durability=rclpy.qos.QoSDurabilityPolicy.VOLATILE)
self._subscription = self.create_subscription(
sensor_msgs.msg.Joy, 'joy', self.joy_callback, qos)
def retrieve_config(self):
config = {}
for param_name in sorted(self._parameters.keys()):
pval = self.get_parameter(param_name).value
self.insert_dict(config, param_name, pval)
return config
def insert_dict(self, dictionary: typing.Dict[str, typing.Any], key: str, value: str) -> None:
split = key.partition(PARAMETER_SEPARATOR_STRING)
if split[0] == key and split[1] == '' and split[2] == '':
dictionary[key] = value
else:
if not split[0] in dictionary:
dictionary[split[0]] = {}
self.insert_dict(dictionary[split[0]], split[2], value)
def joy_callback(self, msg: sensor_msgs.msg.Joy) -> None:
for command in self.commands:
command.run(self, msg)
def main(args=None):
rclpy.init(args=args)
node = JoyTeleop()
try:
rclpy.spin(node)
except JoyTeleopException as e:
node.get_logger().error(e.message)
except KeyboardInterrupt:
pass
node.destroy_node()
rclpy.shutdown()
|
984,187 | 94238e116413d964e33a334f4b0e9e7fbb4f1dc8 | import io
import logging
import os
os.environ["TZ"] = "UTC"
from copy import deepcopy
from deepdiff import DeepDiff
from datetime import datetime
import os.path
from unittest.mock import MagicMock
from typing import Tuple
import pytest
from werkzeug.exceptions import NotFound, Unauthorized, UnprocessableEntity, BadRequest
from cidc_schemas import prism
from cidc_schemas.prism import PROTOCOL_ID_FIELD_NAME, LocalFileUploadEntry
from cidc_schemas.template_reader import ValidationError
from cidc_api.resources.upload_jobs import (
INTAKE_ROLES,
extract_schema_and_xlsx,
requires_upload_token_auth,
upload_data_files,
_remove_optional_uuid_recursive,
)
from cidc_api.models import (
TrialMetadata,
Users,
UploadJobs,
UploadJobStatus,
Permissions,
DownloadableFiles,
CIDCRole,
ROLES,
ValidationMultiError,
)
from ..utils import make_role, mock_current_user, make_admin, mock_gcloud_client
trial_id = "test_trial"
user_email = "test@email.com"
PBMC_PATCH: dict = {
"participants": [
{
"samples": [
{"cimac_id": "CTTTP01A1.00"},
{"cimac_id": "CTTTP01A2.00"},
{"cimac_id": "CTTTP01A3.00"},
],
"cimac_participant_id": "CTTTP01",
},
{
"samples": [
{"cimac_id": "CTTTP02A1.00"},
{"cimac_id": "CTTTP02A2.00"},
{"cimac_id": "CTTTP02A3.00"},
],
"cimac_participant_id": "CTTTP02",
},
]
}
def setup_trial_and_user(cidc_api, monkeypatch) -> int:
"""
Insert a trial and a cimac-user into the database, and set the user
as the current user.
"""
# this is necessary for adding/removing permissions from this user
# without trying to contact GCP
mock_gcloud_client(monkeypatch)
user = Users(
email=user_email, role=CIDCRole.CIMAC_USER.value, approval_date=datetime.now()
)
mock_current_user(user, monkeypatch)
with cidc_api.app_context():
TrialMetadata(
trial_id="test_trial",
metadata_json={
prism.PROTOCOL_ID_FIELD_NAME: trial_id,
"participants": [],
"allowed_cohort_names": ["Arm_Z"],
"allowed_collection_event_names": [],
},
).insert()
user.insert()
return user.id
def setup_upload_jobs(cidc_api) -> Tuple[int, int, int]:
"""
Insert two uploads into the database created by different users
and return their IDs.
Insert a third upload by a different user and change its status to merge-completed.
"""
with cidc_api.app_context():
other_user = Users(email="other@email.org")
other_user.insert()
test2user = Users(email="test2@email.org")
test2user.insert()
job1 = UploadJobs(
uploader_email=user_email,
trial_id=trial_id,
status=UploadJobStatus.STARTED.value,
metadata_patch={"test": {"upload_placeholder": "baz"}, "test2": "foo"},
upload_type="",
gcs_xlsx_uri="",
gcs_file_map={"bip": "baz"},
multifile=False,
)
job2 = UploadJobs(
uploader_email=other_user.email,
trial_id=trial_id,
status=UploadJobStatus.STARTED.value,
metadata_patch={"array": [{"upload_placeholder": "baz"}, {"test2": "foo"}]},
upload_type="",
gcs_xlsx_uri="",
gcs_file_map={"bip": "baz"},
multifile=False,
)
job_merge_completed = UploadJobs(
uploader_email=test2user.email,
trial_id=trial_id,
status=UploadJobStatus.STARTED.value,
metadata_patch={"test": {"upload_placeholder": "baz"}, "test2": "foo"},
upload_type="olink",
gcs_xlsx_uri="dummy_file.txt",
gcs_file_map=None,
multifile=False,
)
job_merge_completed.status = UploadJobStatus.UPLOAD_COMPLETED.value
job_merge_completed.status = UploadJobStatus.MERGE_COMPLETED.value
job1.insert()
job2.insert()
job_merge_completed.insert()
return job1.id, job2.id, job_merge_completed.id
def make_nci_biobank_user(user_id, cidc_api):
with cidc_api.app_context():
user = Users.find_by_id(user_id)
user.role = CIDCRole.NCI_BIOBANK_USER.value
user.update()
def make_cimac_biofx_user(user_id, cidc_api):
with cidc_api.app_context():
user = Users.find_by_id(user_id)
user.role = CIDCRole.CIMAC_BIOFX_USER.value
user.update()
### UploadJobs REST endpoints ###
def test_list_upload_jobs(cidc_api, clean_db, monkeypatch):
"""Check that listing upload jobs works as expected."""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
user_job, other_job, merge_completed_job = setup_upload_jobs(cidc_api)
client = cidc_api.test_client()
# Regular CIMAC users aren't allowed to list upload jobs
res = client.get("upload_jobs")
assert res.status_code == 401
# Biofx users can only view their own upload jobs by default
make_cimac_biofx_user(user_id, cidc_api)
res = client.get("upload_jobs")
assert res.status_code == 200
assert res.json["_meta"]["total"] == 1
assert res.json["_items"][0]["id"] == user_job
# Admin users can view all upload jobs
make_admin(user_id, cidc_api)
res = client.get("upload_jobs")
assert res.status_code == 200
assert res.json["_meta"]["total"] == 3
assert set(i["id"] for i in res.json["_items"]) == set(
[user_job, other_job, merge_completed_job]
)
def test_get_upload_job(cidc_api, clean_db, monkeypatch):
"""Check that getting a single upload job by ID works as expected."""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
user_job, other_job, _ = setup_upload_jobs(cidc_api)
client = cidc_api.test_client()
# Regular CIMAC users aren't allowed to get upload jobs
res = client.get(f"upload_jobs/{user_job}")
assert res.status_code == 401
make_cimac_biofx_user(user_id, cidc_api)
# 404 for non-existent upload
res = client.get(f"upload_jobs/123123")
assert res.status_code == 404
# 404 for another user's upload if non-admin
res = client.get(f"upload_jobs/{other_job}")
assert res.status_code == 404
# 200 for user's upload
res = client.get(f"upload_jobs/{user_job}")
assert res.status_code == 200
assert res.json["id"] == user_job
# 200 for another user's upload if admin
make_admin(user_id, cidc_api)
res = client.get(f"upload_jobs/{other_job}")
assert res.status_code == 200
assert res.json["id"] == other_job
def test_requires_upload_token_auth(cidc_api, clean_db, monkeypatch):
"""Check that the requires_upload_token_auth decorator works as expected"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
job_id = setup_upload_jobs(cidc_api)[0]
with cidc_api.app_context():
job = UploadJobs.find_by_id(job_id)
test_route = "/foobarfoo"
@requires_upload_token_auth
def endpoint(*args, **kwargs):
assert "upload_job" in kwargs
return "ok", 200
query_route = f"{test_route}/{job_id}"
nonexistent_job_id = "9999999"
# User must provide `token` query param
with cidc_api.test_request_context(query_route):
with pytest.raises(UnprocessableEntity) as e:
endpoint(upload_job=job_id)
assert e._excinfo[1].data["messages"]["query"]["token"] == [
"Missing data for required field."
]
# User must provide correct `token` query param
with cidc_api.test_request_context(f"{query_route}?token={'bad token'}"):
with pytest.raises(
Unauthorized, match="upload_job token authentication failed"
):
endpoint(upload_job=job_id)
with cidc_api.test_request_context(f"{query_route}?token={job.token}"):
assert endpoint(upload_job=job_id) == ("ok", 200)
# User whose id token authentication succeeds gets a 404 if the relevant job doesn't exist
with cidc_api.test_request_context(
f"{test_route}/{nonexistent_job_id}?token={job.token}"
):
with pytest.raises(NotFound):
endpoint(upload_job=nonexistent_job_id)
monkeypatch.setattr(
"cidc_api.resources.upload_jobs.authenticate_and_get_user",
lambda *args, **kwargs: None,
)
# User whose id token authentication fails can still successfully authenticate
# using an upload token.
with cidc_api.test_request_context(f"{query_route}?token={job.token}"):
assert endpoint(upload_job=job_id) == ("ok", 200)
# User whose id token authentication fails gets a 401 if the relevant job doesn't exist
with cidc_api.test_request_context(
f"{test_route}/{nonexistent_job_id}?token={job.token}"
):
with pytest.raises(
Unauthorized, match="upload_job token authentication failed"
):
endpoint(upload_job=nonexistent_job_id)
@pytest.mark.skip("Data Freeze")
def test_update_upload_job(cidc_api, clean_db, monkeypatch):
"""Check that getting a updating an upload job by ID works as expected."""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
user_job, other_job, _ = setup_upload_jobs(cidc_api)
with cidc_api.app_context():
user_job_record = UploadJobs.find_by_id(user_job)
other_job_record = UploadJobs.find_by_id(other_job)
publish_success = MagicMock()
monkeypatch.setattr(
"cidc_api.shared.gcloud_client.publish_upload_success", publish_success
)
revoke_upload_access = MagicMock()
monkeypatch.setattr(
"cidc_api.shared.gcloud_client.revoke_upload_access", revoke_upload_access
)
client = cidc_api.test_client()
# Possible patches
upload_success = {"status": UploadJobStatus.UPLOAD_COMPLETED.value}
upload_failure = {"status": UploadJobStatus.UPLOAD_FAILED.value}
invalid_update = {"status": UploadJobStatus.MERGE_COMPLETED.value}
# A user gets error if they fail to provide an upload token
res = client.patch(f"/upload_jobs/{other_job}", json=upload_success)
assert res.status_code == 422
publish_success.assert_not_called()
revoke_upload_access.assert_not_called()
# A user gets an authentication error if they provide an incorrect upload token
res = client.patch(
f"/upload_jobs/{other_job}?token=nope",
headers={"if-match": other_job_record._etag},
json=upload_success,
)
assert res.status_code == 401
assert res.json["_error"]["message"] == "upload_job token authentication failed"
publish_success.assert_not_called()
revoke_upload_access.assert_not_called()
# A user gets an error if they try to update something besides the job's status
res = client.patch(
f"/upload_jobs/{other_job}?token={other_job_record.token}",
headers={"if-match": other_job_record._etag},
json={"uploader_email": "foo@bar.com", "status": ""},
)
assert res.status_code == 422
assert res.json["_error"]["message"]["uploader_email"][0] == "Unknown field."
# A user providing a correct token can update their job's status to be a failure
res = client.patch(
f"/upload_jobs/{other_job}?token={other_job_record.token}",
headers={"if-match": other_job_record._etag},
json={"gcs_file_map": {"foo": "bar"}, **upload_failure},
)
assert res.status_code == 200
publish_success.assert_not_called()
revoke_upload_access.assert_called_once()
revoke_upload_access.reset_mock()
with cidc_api.app_context():
modified_job = UploadJobs.find_by_id(other_job)
assert modified_job.metadata_patch == {"array": [{"test2": "foo"}]}
user_job_record._set_status_no_validation(UploadJobStatus.STARTED.value)
user_job_record.update()
# A user can update a job to be a success
# Also allows for updating the gcs_file_map and thereby the metadata_patch
res = client.patch(
f"/upload_jobs/{user_job}?token={user_job_record.token}",
headers={"if-match": user_job_record._etag},
json={"gcs_file_map": {"foo": "bar"}, **upload_success},
)
assert res.status_code == 200
publish_success.assert_called_once_with(user_job)
revoke_upload_access.assert_called_once()
with cidc_api.app_context():
modified_job = UploadJobs.find_by_id(user_job)
assert modified_job.gcs_file_map == {"foo": "bar"}
assert modified_job.metadata_patch == {"test2": "foo"}
publish_success.reset_mock()
revoke_upload_access.reset_mock()
with cidc_api.app_context():
user_job_record._set_status_no_validation(UploadJobStatus.STARTED.value)
user_job_record.update()
# Users can't make an illegal state transition
res = client.patch(
f"/upload_jobs/{user_job}?token={user_job_record.token}",
headers={"if-match": user_job_record._etag},
json=invalid_update,
)
assert res.status_code == 400
### Ingestion tests ###
@pytest.fixture
def some_file():
return io.BytesIO(b"foobar")
def grant_upload_permission(user_id, upload_type, cidc_api):
with cidc_api.app_context():
Permissions(
granted_by_user=user_id,
granted_to_user=user_id,
trial_id=trial_id,
upload_type=upload_type,
).insert()
def form_data(filename=None, fp=None, schema=None):
"""
If no filename is provided, return some text form data.
If a filename is provided but no opened file (`fp`) is provided,
return form data with a mock file included.
If a filename and an opened file is provided, return
form data with the provided file included.
"""
data = {"foo": "bar"}
if schema:
data["schema"] = schema
if filename:
fp = fp or io.BytesIO(b"blah blah")
data["template"] = (fp, filename)
return data
VALIDATE = "/ingestion/validate"
ASSAY_UPLOAD = "/ingestion/upload_assay"
ANALYSIS_UPLOAD = "/ingestion/upload_analysis"
MANIFEST_UPLOAD = "/ingestion/upload_manifest"
class UploadMocks:
def __init__(
self,
monkeypatch,
prismify_trial_id="test_trial",
prismify_file_entries=[],
prismify_extra={},
prismify_errors=[],
):
self.grant_write = MagicMock()
monkeypatch.setattr(
"cidc_api.shared.gcloud_client.grant_upload_access", self.grant_write
)
self.upload_xlsx = MagicMock(name="upload_xlsx")
self.upload_xlsx.return_value = MagicMock(name="upload_xlsx.return_value")
self.upload_xlsx.return_value.name = "trial_id/xlsx/assays/wes/12345"
self.upload_xlsx.return_value.size = 100
self.upload_xlsx.return_value.md5_hash = "md5_hash"
self.upload_xlsx.return_value.crc32c = "crc32c_hash"
self.upload_xlsx.return_value.time_created = datetime.now()
monkeypatch.setattr(
"cidc_api.shared.gcloud_client.upload_xlsx_to_gcs", self.upload_xlsx
)
self.revoke_write = MagicMock(name="revoke_write")
monkeypatch.setattr(
"cidc_api.shared.gcloud_client.revoke_upload_access", self.revoke_write
)
self.publish_success = MagicMock(name="publish_success")
monkeypatch.setattr(
"cidc_api.shared.gcloud_client.publish_upload_success", self.publish_success
)
self.publish_patient_sample_update = MagicMock()
monkeypatch.setattr(
"cidc_api.shared.gcloud_client.publish_patient_sample_update",
self.publish_patient_sample_update,
)
self.open_xlsx = MagicMock(name="open_xlsx")
self.open_xlsx.return_value = MagicMock(name="open_xlsx.return_value")
monkeypatch.setattr("openpyxl.load_workbook", self.open_xlsx)
self.iter_errors = MagicMock(name="iter_errors")
self.iter_errors.return_value = (_ for _ in range(0))
monkeypatch.setattr(
"cidc_schemas.template_reader.XlTemplateReader.iter_errors",
self.iter_errors,
)
self.prismify = MagicMock(name="prismify")
monkeypatch.setattr("cidc_schemas.prism.prismify", self.prismify)
self.prismify.return_value = (
dict(**{PROTOCOL_ID_FIELD_NAME: prismify_trial_id}, **prismify_extra),
prismify_file_entries,
prismify_errors,
)
def make_all_assertions(self):
self.prismify.assert_called_once()
self.iter_errors.assert_called_once()
self.open_xlsx.assert_called_once()
def clear_all(self):
for attr in self.__dict__.values():
if isinstance(attr, MagicMock):
attr.reset_mock()
def test_validate_valid_template(cidc_api, some_file, clean_db, monkeypatch):
"""Ensure that the validation endpoint returns no errors for a known-valid .xlsx file"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
make_admin(user_id, cidc_api)
client = cidc_api.test_client()
data = form_data("pbmc.xlsx", some_file, "pbmc")
mocks = UploadMocks(monkeypatch)
grant_upload_permission(user_id, "pbmc", cidc_api)
res = client.post(VALIDATE, data=data)
assert res.status_code == 200
assert res.json["errors"] == []
mocks.iter_errors.assert_called_once()
def test_validate_invalid_template(cidc_api, clean_db, monkeypatch):
"""Ensure that the validation endpoint returns errors for a known-invalid .xlsx file"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
make_admin(user_id, cidc_api)
grant_upload_permission(user_id, "pbmc", cidc_api)
mocks = UploadMocks(monkeypatch)
client = cidc_api.test_client()
# handles ValidationError thrown by `XlTemplateReader.from_xlsx`
mocks.open_xlsx.side_effect = ValidationError("uh oh")
res = client.post(VALIDATE, data=form_data("pbmc.xlsx", io.BytesIO(b"123"), "pbmc"))
assert res.status_code == 400
assert res.json["_error"]["message"]["errors"] == ["uh oh"]
# handles errors returned by `XlTemplateReader.iter_errors`
mocks.open_xlsx.side_effect = None
mocks.iter_errors.return_value = ["test error"]
res = client.post(VALIDATE, data=form_data("pbmc.xlsx", io.BytesIO(b"123"), "pbmc"))
assert res.status_code == 400
assert len(res.json["_error"]["message"]) > 0
@pytest.mark.parametrize(
"url,data,error,message",
[
# Missing form content
[VALIDATE, None, BadRequest, "form content"],
# Form missing template file
[VALIDATE, form_data(), BadRequest, "template file"],
# Template file is non-.xlsx
[VALIDATE, form_data("text.txt"), BadRequest, ".xlsx file"],
# URL is missing "schema" query param
[VALIDATE, form_data("text.xlsx"), BadRequest, "form entry for 'schema'"],
# "schema" query param references non-existent schema
[
VALIDATE,
form_data("test.xlsx", schema="foo/bar"),
BadRequest,
"not supported",
],
],
)
def test_extract_schema_and_xlsx_failures(
cidc_api, url, data, error, message, clean_db, monkeypatch
):
"""
Test that we get the expected errors when trying to extract
schema/template from a malformed request.
"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
with cidc_api.test_request_context(url, data=data):
with pytest.raises(error, match=message):
extract_schema_and_xlsx([])
def test_upload_manifest_non_existing_trial_id(
cidc_api, some_file, clean_db, monkeypatch
):
"""Ensure the upload_manifest endpoint follows the expected execution flow"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
make_admin(user_id, cidc_api)
mocks = UploadMocks(monkeypatch, prismify_trial_id="test-non-existing-trial-id")
client = cidc_api.test_client()
res = client.post(MANIFEST_UPLOAD, data=form_data("pbmc.xlsx", some_file, "pbmc"))
assert res.status_code == 400
assert "test-non-existing-trial-id" in str(res.json["_error"]["message"])
# Check that we tried to upload the excel file
mocks.upload_xlsx.assert_not_called()
mocks.iter_errors.assert_called_once()
mocks.prismify.assert_called_once()
@pytest.mark.skip("Data Freeze")
def test_upload_manifest_on_validation_multierror(
cidc_api, some_file, clean_db, monkeypatch
):
"""Ensure that manifest uploads catch ValidationMultiErrors"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
make_admin(user_id, cidc_api)
UploadMocks(monkeypatch)
client = cidc_api.test_client()
patch_manifest = MagicMock()
patch_manifest.side_effect = ValidationMultiError(["one error", "another error"])
monkeypatch.setattr(
"cidc_api.resources.upload_jobs.TrialMetadata.patch_manifest",
staticmethod(patch_manifest),
)
res = client.post(MANIFEST_UPLOAD, data=form_data("pbmc.xlsx", some_file, "pbmc"))
assert res.status_code == 400
assert res.json["_error"]["message"] == {"errors": ["one error", "another error"]}
def test_upload_invalid_manifest(cidc_api, some_file, clean_db, monkeypatch):
"""Ensure the upload_manifest endpoint follows the expected execution flow"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
make_admin(user_id, cidc_api)
mocks = UploadMocks(monkeypatch)
mocks.iter_errors.return_value = ["bad, bad error"]
grant_upload_permission(user_id, "pbmc", cidc_api)
client = cidc_api.test_client()
res = client.post(MANIFEST_UPLOAD, data=form_data("pbmc.xlsx", some_file, "pbmc"))
assert res.status_code == 400
assert len(res.json["_error"]["message"]["errors"]) > 0
# Check that we tried to upload the excel file
mocks.upload_xlsx.assert_not_called()
def test_upload_unsupported_manifest(cidc_api, some_file, clean_db, monkeypatch):
"""Ensure the upload_manifest endpoint follows the expected execution flow"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
make_admin(user_id, cidc_api)
mocks = UploadMocks(monkeypatch)
client = cidc_api.test_client()
res = client.post(
MANIFEST_UPLOAD, data=form_data("pbmc.xlsx", some_file, "UNSUPPORTED_")
)
assert res.status_code == 400
assert (
"'unsupported_' is not supported for this endpoint."
in res.json["_error"]["message"]
)
assert "UNSUPPORTED_".lower() in res.json["_error"]["message"]
# Check that we tried to upload the excel file
mocks.upload_xlsx.assert_not_called()
@pytest.mark.skip("Data Freeze")
def test_admin_upload(cidc_api, clean_db, monkeypatch):
"""Ensure an admin can upload assays and manifests without specific permissions."""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
make_admin(user_id, cidc_api)
mocks = UploadMocks(
monkeypatch,
prismify_extra=PBMC_PATCH,
)
client = cidc_api.test_client()
res = client.post(
MANIFEST_UPLOAD,
data=form_data(
"pbmc.xlsx",
io.BytesIO(b"a"),
"pbmc",
),
)
assert res.status_code == 200
res = client.post(
ASSAY_UPLOAD,
data=form_data(
"wes.xlsx",
io.BytesIO(b"1234"),
"wes_fastq",
),
)
assert res.status_code == 200
@pytest.mark.skip("Data Freeze")
def test_upload_manifest(cidc_api, clean_db, monkeypatch, caplog):
"""Ensure the upload_manifest endpoint follows the expected execution flow"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
mocks = UploadMocks(
monkeypatch,
prismify_extra=PBMC_PATCH,
)
client = cidc_api.test_client()
# NCI users can upload manifests without explicit permission
make_nci_biobank_user(user_id, cidc_api)
with caplog.at_level(logging.DEBUG):
res = client.post(
MANIFEST_UPLOAD,
data=form_data(
"pbmc.xlsx",
io.BytesIO(b"a"),
"pbmc",
),
)
assert res.status_code == 200
# Check that upload alert email was "sent"
assert "Would send email with subject '[UPLOAD SUCCESS]" in caplog.text
# Check that we tried to publish a patient/sample update
mocks.publish_patient_sample_update.assert_called_once()
# Check that we tried to upload the excel file
mocks.make_all_assertions()
@pytest.mark.skip("Data Freeze")
def test_upload_manifest_twice(cidc_api, clean_db, monkeypatch):
"""Ensure that doing upload_manifest twice will produce only one DownloadableFiles"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
mocks = UploadMocks(
monkeypatch,
prismify_extra=PBMC_PATCH,
)
client = cidc_api.test_client()
grant_upload_permission(user_id, "pbmc", cidc_api)
make_nci_biobank_user(user_id, cidc_api)
res = client.post(
MANIFEST_UPLOAD,
data=form_data(
"pbmc.xlsx",
io.BytesIO(b"a"),
"pbmc",
),
)
assert res.status_code == 200
# Check that we tried to publish a patient/sample update
mocks.publish_patient_sample_update.assert_called_once()
with cidc_api.app_context():
assert not DownloadableFiles.list() # manifest is not stored
# uploading second time
res = client.post(
MANIFEST_UPLOAD,
data=form_data(
"pbmc.xlsx",
io.BytesIO(b"a"),
"pbmc",
),
)
assert res.status_code == 200
assert mocks.upload_xlsx.call_count == 0 # manifest is not stored
with cidc_api.app_context():
assert not DownloadableFiles.list() # manifest is not stored
finfo = LocalFileUploadEntry
@pytest.mark.skip("Data Freeze")
def test_upload_endpoint_blocking(cidc_api, clean_db, monkeypatch):
"""Ensure you can't upload an analysis to the upload assay endpoint or vice versa"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
make_admin(user_id, cidc_api)
mocks = UploadMocks(monkeypatch)
client = cidc_api.test_client()
assay_form = lambda: form_data("cytof.xlsx", io.BytesIO(b"1234"), "cytof")
analysis_form = lambda: form_data(
"cytof_analysis.xlsx", io.BytesIO(b"1234"), "cytof_analysis"
)
res = client.post(ASSAY_UPLOAD, data=assay_form())
assert res.status_code == 200
res = client.post(ASSAY_UPLOAD, data=analysis_form())
assert "not supported" in res.json["_error"]["message"]
assert res.status_code == 400
res = client.post(ANALYSIS_UPLOAD, data=analysis_form())
assert res.status_code == 200
res = client.post(ANALYSIS_UPLOAD, data=assay_form())
assert "not supported" in res.json["_error"]["message"]
assert res.status_code == 400
@pytest.mark.skip("Data Freeze")
def test_upload_wes(cidc_api, clean_db, monkeypatch):
"""Ensure the upload endpoint follows the expected execution flow"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
make_cimac_biofx_user(user_id, cidc_api)
with cidc_api.app_context():
user = Users.find_by_id(user_id)
client = cidc_api.test_client()
mocks = UploadMocks(
monkeypatch,
prismify_file_entries=[
finfo("localfile.ext", "test_trial/url/file.ext", "uuid-1", None, False)
],
)
# No permission to upload yet
res = client.post(
ASSAY_UPLOAD,
data=form_data(
"wes.xlsx",
io.BytesIO(b"1234"),
"wes_fastq",
),
)
assert res.status_code == 401
assert "not authorized to upload wes_fastq data" in str(
res.json["_error"]["message"]
)
mocks.clear_all()
# Give permission and retry
grant_upload_permission(user_id, "wes_fastq", cidc_api)
res = client.post(
ASSAY_UPLOAD,
data=form_data(
"wes.xlsx",
io.BytesIO(b"1234"),
"wes_fastq",
),
)
assert res.status_code == 200
assert "url_mapping" in res.json
url_mapping = res.json["url_mapping"]
# WES assay does not have any extra_metadata files, but its (and every assay's) response
# should have an extra_metadata field.
assert "extra_metadata" in res.json
extra_metadata = res.json["extra_metadata"]
assert extra_metadata is None
# We expect local_path to map to a gcs object name with gcs_prefix
local_path = "localfile.ext"
gcs_prefix = "test_trial/url/file.ext"
gcs_object_name = url_mapping[local_path]
assert local_path in url_mapping
assert gcs_object_name.startswith(gcs_prefix)
assert not gcs_object_name.endswith(
local_path
), "PHI from local_path shouldn't end up in gcs urls"
# Check that we tried to grant IAM upload access to gcs_object_name
mocks.grant_write.assert_called_with(user.email)
# Check that we tried to upload the assay metadata excel file
mocks.upload_xlsx.assert_called_once()
job_id = res.json["job_id"]
update_url = f"/upload_jobs/{job_id}"
# Report an upload failure
res = client.patch(
f"{update_url}?token={res.json['token']}",
json={"status": UploadJobStatus.UPLOAD_FAILED.value},
headers={"If-Match": res.json["job_etag"]},
)
assert res.status_code == 200
mocks.revoke_write.assert_called_with(user.email)
# This was an upload failure, so success shouldn't have been published
mocks.publish_success.assert_not_called()
# Reset the upload status and try the request again
with cidc_api.app_context():
job = UploadJobs.find_by_id_and_email(job_id, user.email)
job._set_status_no_validation(UploadJobStatus.STARTED.value)
job.update()
_etag = job._etag
# Report an upload success
res = client.patch(
f"{update_url}?token={res.json['token']}",
json={"status": UploadJobStatus.UPLOAD_COMPLETED.value},
headers={"If-Match": _etag},
)
assert res.status_code == 200
mocks.publish_success.assert_called_with(job_id)
OLINK_TESTDATA = [
("/local/path/combined.xlsx", "test_trial/olink/study_npx.xlsx"),
(
"assay1_npx.xlsx",
"test_trial/olink/chip_111/assay_npx.xlsx",
), # 111 is a chip barcode in .xlsx
(
"ct2.xlsx",
"test_trial/olink/chip_112/assay_raw_ct.xlsx",
), # 112 is a chip barcode in .xlsx
]
@pytest.mark.skip("Data Freeze")
def test_upload_olink(cidc_api, clean_db, monkeypatch):
"""Ensure the upload endpoint follows the expected execution flow"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
with cidc_api.app_context():
user = Users.find_by_id(user_id)
make_cimac_biofx_user(user_id, cidc_api)
client = cidc_api.test_client()
mocks = UploadMocks(
monkeypatch,
prismify_file_entries=[
finfo(lp, url, "uuid" + str(i), "npx" in url, False)
for i, (lp, url) in enumerate(OLINK_TESTDATA)
],
)
# No permission to upload yet
res = client.post(
ASSAY_UPLOAD, data=form_data("olink.xlsx", io.BytesIO(b"1234"), "olink")
)
assert res.status_code == 401
assert "not authorized to upload olink data" in str(res.json["_error"]["message"])
mocks.clear_all()
# Give permission and retry
grant_upload_permission(user_id, "olink", cidc_api)
res = client.post(
ASSAY_UPLOAD, data=form_data("olink.xlsx", io.BytesIO(b"1234"), "olink")
)
assert res.status_code == 200
assert "url_mapping" in res.json
url_mapping = res.json["url_mapping"]
# Olink assay has extra_metadata files
assert "extra_metadata" in res.json
extra_metadata = res.json["extra_metadata"]
assert type(extra_metadata) == dict
# We expect local_path to map to a gcs object name with gcs_prefix.
for local_path, gcs_prefix in OLINK_TESTDATA:
gcs_object_name = url_mapping[local_path]
assert local_path in url_mapping
assert gcs_object_name.startswith(gcs_prefix)
assert (
local_path not in gcs_object_name
), "PHI from local_path shouldn't end up in gcs urls"
# Check that we tried to grant IAM upload access to gcs_object_name
mocks.grant_write.assert_called_with(user.email)
# Check that we tried to upload the assay metadata excel file
mocks.upload_xlsx.assert_called_once()
job_id = res.json["job_id"]
update_url = f"/upload_jobs/{job_id}"
# Report an upload failure
res = client.patch(
f"{update_url}?token={res.json['token']}",
json={"status": UploadJobStatus.UPLOAD_FAILED.value},
headers={"If-Match": res.json["job_etag"]},
)
assert res.status_code == 200
mocks.revoke_write.assert_called_with(user.email)
# This was an upload failure, so success shouldn't have been published
mocks.publish_success.assert_not_called()
# Test upload status validation - since the upload job's current status
# is UPLOAD_FAILED, the API shouldn't permit this status to be updated to
# UPLOAD_COMPLETED.
bad_res = client.patch(
f"{update_url}?token={res.json['token']}",
json={"status": UploadJobStatus.UPLOAD_COMPLETED.value},
headers={"If-Match": res.json["_etag"]},
)
assert bad_res.status_code == 400
assert (
"status upload-failed can't transition to status upload-completed"
in bad_res.json["_error"]["message"]
)
# Reset the upload status and try the request again
with cidc_api.app_context():
job = UploadJobs.find_by_id_and_email(job_id, user.email)
job._set_status_no_validation(UploadJobStatus.STARTED.value)
job.update()
_etag = job._etag
res = client.patch(
f"{update_url}?token={res.json['token']}",
json={"status": UploadJobStatus.UPLOAD_COMPLETED.value},
headers={"If-Match": _etag},
)
assert res.status_code == 200
mocks.publish_success.assert_called_with(job_id)
def test_poll_upload_merge_status(cidc_api, clean_db, monkeypatch):
"""
Check pull_upload_merge_status endpoint behavior
"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
with cidc_api.app_context():
user = Users.find_by_id(user_id)
make_cimac_biofx_user(user_id, cidc_api)
metadata = {PROTOCOL_ID_FIELD_NAME: trial_id}
with cidc_api.app_context():
other_user = Users(email="other@email.com")
other_user.insert()
upload_job = UploadJobs.create(
upload_type="wes",
uploader_email=user.email,
gcs_file_map={},
metadata=metadata,
gcs_xlsx_uri="",
)
upload_job.insert()
upload_job_id = upload_job.id
client = cidc_api.test_client()
# Upload not found
res = client.get(
f"/ingestion/poll_upload_merge_status/12345?token={upload_job.token}"
)
assert res.status_code == 404
upload_job_url = (
f"/ingestion/poll_upload_merge_status/{upload_job_id}?token={upload_job.token}"
)
# Upload not-yet-ready
res = client.get(upload_job_url)
assert res.status_code == 200
assert "retry_in" in res.json and res.json["retry_in"] == 5
assert "status" not in res.json
test_details = "A human-friendly reason for this "
for status in [
UploadJobStatus.MERGE_COMPLETED.value,
UploadJobStatus.MERGE_FAILED.value,
]:
# Simulate cloud function merge status update
with cidc_api.app_context():
upload_job._set_status_no_validation(status)
upload_job.status_details = test_details
upload_job.update()
# Upload ready
res = client.get(upload_job_url)
assert res.status_code == 200
assert "retry_in" not in res.json
assert "status" in res.json and res.json["status"] == status
assert (
"status_details" in res.json and res.json["status_details"] == test_details
)
@pytest.mark.skip("Data Freeze")
def test_extra_assay_metadata(cidc_api, clean_db, monkeypatch):
user_id = setup_trial_and_user(cidc_api, monkeypatch)
make_cimac_biofx_user(user_id, cidc_api)
job_id, _, job_merge_completed_id = setup_upload_jobs(cidc_api)
client = cidc_api.test_client()
res = client.post("/ingestion/extra-assay-metadata")
assert res.status_code == 400
assert "Expected form" in res.json["_error"]["message"]
res = client.post("/ingestion/extra-assay-metadata", data={"foo": "bar"})
assert res.status_code == 400
assert "job_id" in res.json["_error"]["message"]
res = client.post("/ingestion/extra-assay-metadata", data={"job_id": 123})
assert res.status_code == 400
assert "files" in res.json["_error"]["message"]
with monkeypatch.context() as m:
res = client.post(
"/ingestion/extra-assay-metadata",
data={"job_id": 987, "uuid-1": (io.BytesIO(b"fake file"), "fname1")},
)
assert res.status_code == 400
assert "987 doesn't exist" in res.json["_error"]["message"]
with monkeypatch.context() as m:
res = client.post(
"/ingestion/extra-assay-metadata",
data={
"job_id": job_merge_completed_id,
"uuid-1": (io.BytesIO(b"fake file"), "fname1"),
},
)
assert res.status_code == 400
assert (
f"{job_merge_completed_id} doesn't exist or is already merged"
in res.json["_error"]["message"]
)
with monkeypatch.context() as m:
merge_extra_metadata = MagicMock()
merge_extra_metadata.return_value = MagicMock() # not caught
m.setattr(
"cidc_api.models.UploadJobs.merge_extra_metadata", merge_extra_metadata
)
res = client.post(
"/ingestion/extra-assay-metadata",
data={"job_id": job_id, "uuid-1": (io.BytesIO(b"fake file"), "fname1")},
)
assert res.status_code == 200
merge_extra_metadata.assert_called_once()
with monkeypatch.context() as m:
merge_artifact_extra_metadata = MagicMock()
merge_artifact_extra_metadata.return_value = ("md patch", {}, "nothing")
m.setattr(
"cidc_schemas.prism.merge_artifact_extra_metadata",
merge_artifact_extra_metadata,
)
res = client.post(
"/ingestion/extra-assay-metadata",
data={"job_id": job_id, "uuid-1": (io.BytesIO(b"fake file"), "fname1")},
)
assert res.status_code == 200
merge_artifact_extra_metadata.assert_called_once()
with monkeypatch.context() as m:
merge_artifact_extra_metadata = MagicMock()
merge_artifact_extra_metadata.side_effect = ValueError("testing")
m.setattr(
"cidc_schemas.prism.merge_artifact_extra_metadata",
merge_artifact_extra_metadata,
)
res = client.post(
"/ingestion/extra-assay-metadata",
data={"job_id": job_id, "uuid-1": (io.BytesIO(b"fake file"), "fname1")},
)
assert res.status_code == 400 # ValueError should get translated to BadRequest
assert "testing" in res.json["_error"]["message"]
with monkeypatch.context():
merge_artifact_extra_metadata = MagicMock()
merge_artifact_extra_metadata.side_effect = TypeError("testing")
monkeypatch.setattr(
"cidc_schemas.prism.merge_artifact_extra_metadata",
merge_artifact_extra_metadata,
)
res = client.post(
"/ingestion/extra-assay-metadata",
data={"job_id": job_id, "uuid-1": (io.BytesIO(b"fake file"), "fname1")},
)
assert res.status_code == 500 # TypeError should be a server error
@pytest.mark.skip("Data Freeze")
def test_merge_extra_metadata(cidc_api, clean_db, monkeypatch):
"""Ensure merging of extra metadata follows the expected execution flow"""
user_id = setup_trial_and_user(cidc_api, monkeypatch)
with cidc_api.app_context():
user = Users.find_by_id(user_id)
make_cimac_biofx_user(user_id, cidc_api)
with cidc_api.app_context():
assay_upload = UploadJobs.create(
upload_type="assay_with_extra_md",
uploader_email=user.email,
gcs_file_map={},
metadata={
PROTOCOL_ID_FIELD_NAME: trial_id,
"whatever": {
"hierarchy": [
{"we just need a": "uuid-1", "to be able": "to merge"},
{"and": "uuid-2"},
]
},
},
gcs_xlsx_uri="",
commit=False,
)
assay_upload.id = 137
assay_upload.insert()
custom_extra_md_parse = MagicMock()
custom_extra_md_parse.side_effect = lambda f: {"extra_md": f.read().decode()}
monkeypatch.setattr(
"cidc_schemas.prism.merger.EXTRA_METADATA_PARSERS",
{"assay_with_extra_md": custom_extra_md_parse},
)
form_data = {
"job_id": 137,
"uuid-1": (io.BytesIO(b"fake file 1"), "fname1"),
"uuid-2": (io.BytesIO(b"fake file 2"), "fname2"),
}
client = cidc_api.test_client()
res = client.post("/ingestion/extra-assay-metadata", data=form_data)
assert res.status_code == 200
assert custom_extra_md_parse.call_count == 2
fetched_jobs = UploadJobs.list()
assert 1 == len(fetched_jobs)
au = fetched_jobs[0]
assert "extra_md" in au.metadata_patch["whatever"]["hierarchy"][0]
assert "extra_md" in au.metadata_patch["whatever"]["hierarchy"][1]
@pytest.mark.skip("Data Freeze")
def test_create_intake_bucket(cidc_api, clean_db, monkeypatch):
user_id = setup_trial_and_user(cidc_api, monkeypatch)
bucket_name = "test-intake-bucket"
gcloud_client = MagicMock()
gcloud_client.create_intake_bucket = MagicMock()
bucket = MagicMock()
bucket.name = bucket_name
gcloud_client.create_intake_bucket.return_value = bucket
monkeypatch.setattr("cidc_api.resources.upload_jobs.gcloud_client", gcloud_client)
client = cidc_api.test_client()
for role in ROLES:
make_role(user_id, role, cidc_api)
res = client.post(
"/ingestion/intake_bucket",
json={"trial_id": "test-trial", "upload_type": "test-upload"},
)
if role in INTAKE_ROLES:
assert res.status_code == 200
gcloud_client.create_intake_bucket.assert_called_with(user_email)
assert res.json == {
"gs_url": f"gs://{bucket_name}/test-trial/test-upload",
"console_url": f"https://console.cloud.google.com/storage/browser/{bucket_name}/test-trial/test-upload",
}
else:
assert res.status_code == 401
gcloud_client.create_intake_bucket.reset_mock()
@pytest.mark.skip("Data Freeze")
def test_send_intake_metadata(cidc_api, clean_db, monkeypatch):
user_id = setup_trial_and_user(cidc_api, monkeypatch)
xlsx_url = "https://same/fake/url"
gcloud_client = MagicMock()
gcloud_client.upload_xlsx_to_intake_bucket = MagicMock()
gcloud_client.upload_xlsx_to_intake_bucket.return_value = xlsx_url
monkeypatch.setattr("cidc_api.resources.upload_jobs.gcloud_client", gcloud_client)
intake_metadata_email = MagicMock()
monkeypatch.setattr("cidc_api.shared.emails.intake_metadata", intake_metadata_email)
client = cidc_api.test_client()
form_data = {
"trial_id": "test-trial",
"assay_type": "wes",
"description": "a test description",
}
for role in ROLES:
make_role(user_id, role, cidc_api)
# do this here to recreate the BytesIO object each time
form_data["xlsx"] = (io.BytesIO(b"test metadata"), "test_metadata.xlsx")
res = client.post("/ingestion/intake_metadata", data=form_data)
if role in INTAKE_ROLES:
assert res.status_code == 200
args, kwargs = intake_metadata_email.call_args
assert args[0].id == user_id
assert kwargs.pop("xlsx_gcp_url") == xlsx_url
assert kwargs.pop("send_email") == True
form_data.pop("xlsx")
assert kwargs == form_data
else:
assert res.status_code == 401
intake_metadata_email.reset_mock()
def test_upload_data_files(cidc_api, monkeypatch):
user = Users(email="other@email.com")
trial = TrialMetadata(
trial_id="test_trial",
metadata_json={
prism.PROTOCOL_ID_FIELD_NAME: trial_id,
"participants": [],
"allowed_cohort_names": ["Arm_Z"],
"allowed_collection_event_names": [],
},
)
template_type = "foo"
xlsx_file = MagicMock()
md_patch = {}
file_infos = [
finfo(
"localfile1.ext",
"test_trial/url/file1.ext",
"uuid-1",
metadata_availability=None,
allow_empty=None,
),
finfo(
"localfile2.ext",
"test_trial/url/file2.ext",
"uuid-2",
metadata_availability=True,
allow_empty=None,
),
finfo(
"localfile3.ext",
"test_trial/url/file3.ext",
"uuid-3",
metadata_availability=None,
allow_empty=True,
),
finfo(
"localfile4.ext",
"test_trial/url/file4.ext",
"uuid-4",
metadata_availability=True,
allow_empty=True,
),
]
gcloud_client = MagicMock()
gcloud_client.grant_upload_access = MagicMock()
gcloud_client.upload_xlsx_to_gcs = MagicMock()
gcs_blob = MagicMock()
gcs_blob.name = "blob"
gcloud_client.upload_xlsx_to_gcs.return_value = gcs_blob
monkeypatch.setattr("cidc_api.resources.upload_jobs.gcloud_client", gcloud_client)
create = MagicMock()
job = MagicMock()
job.id = "id"
job._etag = "_etag"
job.token = "token"
create.return_value = job
monkeypatch.setattr("cidc_api.resources.upload_jobs.UploadJobs.create", create)
with cidc_api.app_context():
response = upload_data_files(
user, trial, template_type, xlsx_file, md_patch, file_infos
)
json = response.get_json()
assert "job_id" in json and json["job_id"] == "id"
assert "job_etag" in json and json["job_etag"] == "_etag"
assert "url_mapping" in json
url_mapping = {k: v.rsplit("/", 1)[0] for k, v in json["url_mapping"].items()}
assert url_mapping == {
"localfile1.ext": "test_trial/url/file1.ext",
"localfile2.ext": "test_trial/url/file2.ext",
"localfile3.ext": "test_trial/url/file3.ext",
"localfile4.ext": "test_trial/url/file4.ext",
}
assert "gcs_bucket" in json and json["gcs_bucket"] == "cidc-uploads-staging"
assert "extra_metadata" in json and json["extra_metadata"] == {
"localfile2.ext": "uuid-2",
"localfile4.ext": "uuid-4",
}
assert "gcs_file_map" in json
gcs_file_map = sorted(
[(k.rsplit("/", 1)[0], v) for k, v in json["gcs_file_map"].items()],
key=lambda i: i[0],
)
assert gcs_file_map == [
("test_trial/url/file1.ext", "uuid-1"),
("test_trial/url/file2.ext", "uuid-2"),
("test_trial/url/file3.ext", "uuid-3"),
("test_trial/url/file4.ext", "uuid-4"),
]
assert "optional_files" in json and json["optional_files"] == [
"localfile3.ext",
"localfile4.ext",
]
assert "token" in json and json["token"] == "token"
def test_remove_optional_uuid_recursive():
test = {
"foo": {"upload_placeholder": "one-deep"},
"bar": {
"foo": {"upload_placeholder": "two-deep"},
"baz": [{"upload_placeholder": "two-deep array"}],
},
"baz": [
{"upload_placeholder": "one-deep array"},
{"upload_placeholder": "second item"},
],
"int": 4,
}
this_test = deepcopy(test)
del this_test["foo"]
assert not DeepDiff(
this_test, _remove_optional_uuid_recursive(deepcopy(test), "one-deep")
)
this_test = deepcopy(test)
del this_test["bar"]["foo"]
assert not DeepDiff(
this_test, _remove_optional_uuid_recursive(deepcopy(test), "two-deep")
)
this_test = deepcopy(test)
del this_test["bar"]["baz"]
assert not DeepDiff(
this_test, _remove_optional_uuid_recursive(deepcopy(test), "two-deep array")
)
this_test = deepcopy(test)
del this_test["baz"][0]
assert not DeepDiff(
this_test, _remove_optional_uuid_recursive(deepcopy(test), "one-deep array")
)
this_test = deepcopy(test)
del this_test["baz"][1]
assert not DeepDiff(
this_test, _remove_optional_uuid_recursive(deepcopy(test), "second item")
)
this_test = deepcopy(test)
del this_test["baz"]
temp = _remove_optional_uuid_recursive(deepcopy(test), "one-deep array")
assert not DeepDiff(
_remove_optional_uuid_recursive(deepcopy(temp), "second item"), this_test
)
this_test = deepcopy(test)
del this_test["bar"]
temp = _remove_optional_uuid_recursive(deepcopy(test), "two-deep array")
assert not DeepDiff(
_remove_optional_uuid_recursive(deepcopy(temp), "two-deep"), this_test
)
this_test = deepcopy(test)
del this_test["foo"]
temp = _remove_optional_uuid_recursive(deepcopy(test), "one-deep")
assert not DeepDiff(temp, this_test)
del this_test["bar"]["foo"]
temp = _remove_optional_uuid_recursive(temp, "two-deep")
assert not DeepDiff(temp, this_test)
del this_test["bar"]
temp = _remove_optional_uuid_recursive(temp, "two-deep array")
assert not DeepDiff(temp, this_test)
del this_test["baz"][0]
temp = _remove_optional_uuid_recursive(temp, "one-deep array")
assert not DeepDiff(temp, this_test)
temp = _remove_optional_uuid_recursive(temp, "second item")
assert not DeepDiff(temp, {"int": 4})
|
984,188 | 62dac69a2e3597e539f7b246c1df354df3495d6b | #Ten program obliczy ile trzeba zapłacić napiwku kelnerowi
#Adam Orzechowski 8/01/2015
bill = float(input("Ile zapłaciłeś ostatnio w restauracji? Kwotę podaj w złotych. Zapłaciłem:"))
print("\nOk, więc zapłaciłeś:", bill, "zł.")
print("\n\nJeśli chcesz zapłacić napiwek wysokości 15%, bez groszy, będzie wynosił", bill * 0.15//1, "zł.")
print("\nJeśli chcesz zapłacić napiwek wysokości 20%, (również bez zbędnej końcówki) będzie on wynosił około", bill * 0.2//1, "zł.")
input("\n\nAby zakończyć naciśnij 'Enter'")
|
984,189 | 463ba3e0d51382c62e50748aceef376f189f88ad | # coding=utf-8
# Defina la clase 'Rational' en este espacio
if __name__ == "__main__":
r1 = Rational(26, 4)
r2 = Rational(-2, 6)
r3 = Rational(34, 7)
# 13/2 -1/3 34/7
print(r1, r2, r3, sep=", ")
# [Rational(1), Rational(-11/2)]
print([Rational(1, 1), Rational(22, -4)])
# 41/6
print(r1 - r2)
# 221/7
print(r1 * r3)
# 7/5
print(r2 / Rational(5, -7))
# True
print(Rational(-4, 6) < Rational(1, -7))
# True
print(Rational(12, 8) == Rational(-24, -16))
|
984,190 | 5b82eec535a9793ef55bd9499cae9f5e349af5f6 | from openerp import models,fields, api
class BankAccountNumber(models.Model):
_name = 'migrate.employee.loans'
_description = 'Migration for Employee Loan'
name = fields.Char('Employee Name')
employee_id = fields.Many2one('Employee ID')
employee_account_number = fields.Char('Account Number')
sss_loan_amount = fields.Float('SSS Loan Amount', default = 0)
pagibig_salary_loan = fields.Float('Pagibig Salary Loan Amount', default = 0)
pagibig_calamity_loan = fields.Float('Pagibig Calamity Loan Amount', default = 0)
@api.one
def Generate(self):
MONTH_REMAINING = 24
MONTH_START =1
YEAR_START =2016
model_migration = self.env['migrate.employee.loans'].search([])
if len(model_migration) > 0:
for migrate in model_migration:
# SSS LOAN
hr_employee = self.env['hr.employee'].search([('name','=', migrate.name)])
if migrate.sss_loan_amount > 0:
if len(hr_employee) > 0:
hr_employee.write({
'sss_loans_monthly_amortization': migrate.sss_loan_amount,
'sss_loans_remaining_months': MONTH_REMAINING,
'sss_loans_start_Month': MONTH_START,
'sss_loans_start_year': YEAR_START,})
# Salary Loan
if migrate.pagibig_salary_loan > 0:
if len(hr_employee) > 0:
hr_employee.write({
'pagibig_salaryloan_monthly_amortization': migrate.pagibig_salary_loan,
'pagibig_salaryloan_remaining_months': MONTH_REMAINING,
'pagibig_salaryloan_start_Month': MONTH_START,
'pagibig_salaryloan_start_year': YEAR_START,})
# Calamity Loan
if migrate.pagibig_calamity_loan > 0:
if len(hr_employee) > 0:
hr_employee.write({
'pagibig_calamityloan_monthly_amortization': migrate.pagibig_calamity_loan,
'pagibig_calamityloan_remaining_months': MONTH_REMAINING,
'pagibig_calamityloan_start_Month': MONTH_START,
'pagibig_calamityloan_start_year': YEAR_START,})
else:
raise Warning('No Employee')
|
984,191 | f61fdc9801d3f28d22549709ffab4c6740b7e9c3 | '''Blackjack Game: Player vs Dealer'''
import random
SUITS = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
RANKS = ('Two', 'Three', 'Four', 'Five', 'Six',
'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
VALUES = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7,
'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10, 'Queen':10, 'King':10, 'Ace':1}
class Card():
'''Card class contains card suit, rank, value'''
def __init__(self, suit, rank):
self.suit = suit
self.rank = rank
self.value = VALUES[rank]
def __str__(self):
return f'{self.rank} of {self.suit}'
class Deck():
'''Deck class holds standard 52 card deck'''
def __init__(self):
self.all_cards = []
for suit in SUITS:
for rank in RANKS:
self.all_cards.append(Card(suit, rank))
def shuffle(self):
random.shuffle(self.all_cards)
def deal_card_from_deck(self):
return self.all_cards.pop()
class Dealer():
'''Dealer follows conventional Blackjack rules
to decide when to hit or stand'''
def __init__(self):
self.name = 'Dealer'
self.hand = []
def add_card_to_hand(self, new_card):
self.hand.append(new_card)
def get_deal(self):
'''Second card is dealt face down
on initial deal to dealer'''
return self.hand[0]
def get_hand(self):
return self.hand
def decide(self, num):
if num <= 16:
return 'hit'
return 'stand'
def clear_hand(self):
self.hand.clear()
class Player():
'''Player contains player methods especially for bankroll'''
def __init__(self, name, bankroll):
self.name = name
self.bankroll = int(bankroll)
self.hand = []
def add_card_to_hand(self, new_card):
self.hand.append(new_card)
def add_to_bankroll(self, gains):
self.bankroll += gains
def bet(self, amt):
self.bankroll -= amt
def get_hand(self):
return self.hand
def clear_hand(self):
self.hand.clear()
def show_hand(cards):
print('Cards are ', *cards, sep='.')
print(f'Value of cards is {calculate_hand(cards)}')
def calculate_hand(cards):
hand_value = 0
aces_count = 0
for card in cards:
if card.rank == 'Ace':
aces_count += 1
hand_value += card.value
if hand_value <= 11 and aces_count >= 1:
hand_value += 10
return hand_value
def is_busted(cards):
hand_value = 0
for card in cards:
hand_value += card.value
return hand_value > 21
def prompt_for_player_info():
name = input('Enter your name: ')
bankroll = input('Enter starting funds: ')
while not bankroll.isdigit:
print('Please enter valid number')
bankroll = input('Enter starting funds: ')
return name, int(bankroll)
def prompt_to_place_bet(player):
bet = input('Place a bet: ')
while not bet.isdigit or int(bet) > player.bankroll:
print('Please enter valid number less than or equal to bankroll')
bet = input('Place a bet: ')
player.bet(int(bet))
return int(bet)
def prompt_hit_or_stand(player):
decision = input(f'Hit or stand: ')
while decision.lower() not in ['hit', 'stand']:
print(f"Not a valid entry. Enter 'hit' or 'stand'.")
decision = input(f'Hit or stand: ')
return decision.lower()
def prompt_keep_playing():
decision = input(f'Keep playing? (Yes/No): ')
while decision.lower() not in ['yes', 'no']:
print(f"Not a valid entry. Enter 'Yes' or 'No'.")
decision = input(f'Keep playing? (Yes/No): ')
return decision.lower() == 'yes'
if __name__ == '__main__':
print('***** Blackjack ******')
p_name, bank = prompt_for_player_info()
player = Player(p_name, bank)
print(f'Hello {player.name}, you are starting with ${player.bankroll:0.2f} in chips')
print("Let's play!")
dealer = Dealer()
print('Shuffling cards...')
deck = Deck()
deck.shuffle()
keep_playing = True
while keep_playing:
#Place Bet
print(f'Player bankroll is ${player.bankroll:0.2f}')
bet = prompt_to_place_bet(player)
print(f'You are betting ${bet:0.2f}')
#Deal cards
print(f'Dealing cards...')
player.add_card_to_hand(deck.deal_card_from_deck())
dealer.add_card_to_hand(deck.deal_card_from_deck())
player.add_card_to_hand(deck.deal_card_from_deck())
dealer.add_card_to_hand(deck.deal_card_from_deck())
#Initial reveal of dealer hand
print(f'The dealer is showing', end=' ')
print(f'{dealer.get_deal()}')
is_player_busted = False
is_dealer_busted = False
while (not is_player_busted and not is_dealer_busted):
#The Play (Hit or Stand by player)
continue_to_deal = True
while continue_to_deal and not is_player_busted:
print(f'Player', end=' ')
show_hand(player.get_hand())
decision = prompt_hit_or_stand(player)
if decision == 'stand':
continue_to_deal = False
else:
player.add_card_to_hand(deck.deal_card_from_deck())
is_player_busted = is_busted(player.get_hand())
if is_player_busted:
print(f'Player', end=' ')
show_hand(player.get_hand())
print(f'Player is Bust!')
break
#The Dealer's Play (Hit or stand by dealer)
'''If the total is 17 or more, it must stand.
If the total is 16 or under, they must take a card.
The dealer must continue to take cards until the total is 17 or more,
at which point the dealer must stand. If the dealer has an ace,
and counting it as 11 would bring the total to 17 or more (but not over 21),
the dealer must count the ace as 11 and stand. '''
continue_to_deal = True
while continue_to_deal and not is_dealer_busted:
print(f'Dealer', end=' ')
show_hand(dealer.get_hand())
decision = dealer.decide(calculate_hand(dealer.get_hand()))
if decision == 'stand':
continue_to_deal = False
else:
dealer.add_card_to_hand(deck.deal_card_from_deck())
print(f'Dealer has hit')
is_dealer_busted = is_busted(dealer.get_hand())
if is_dealer_busted:
print(f'Dealer', end=' ')
show_hand(dealer.get_hand())
print(f'Dealer is Bust!')
break
if not continue_to_deal:
break
#Settlement
dealer_hand_total = calculate_hand(dealer.get_hand())
player_hand_total = calculate_hand(player.get_hand())
if is_player_busted:
print(f'Dealer wins ${bet:0.2f}')
elif is_dealer_busted and not is_player_busted:
bet *= 1.5
print(f'Player wins ${bet:0.2f}')
player.add_to_bankroll(bet)
elif dealer_hand_total > player_hand_total:
#player lost bet
print(f'Dealer wins ${bet:0.2f}')
elif dealer_hand_total < player_hand_total:
#player won bet
bet *= 1.5
print(f'Player wins ${bet:0.2f}')
player.add_to_bankroll(bet)
elif dealer_hand_total == player_hand_total:
#player drew
print(f'Draw. Player gets back {bet:0.2f}')
player.add_to_bankroll(bet)
#Ask to keep playing
keep_playing = prompt_keep_playing()
if keep_playing:
#reset deck and hands
deck = Deck()
deck.shuffle()
player.clear_hand()
dealer.clear_hand()
else:
print(f'You left with ${player.bankroll:0.2f}')
break
if player.bankroll < 1:
print(f'You have ${player.bankroll:0.2f}')
print(f'Insufficient funds. Get more chips.')
break
|
984,192 | df0fc64b8c562899df27e0d6698418bc5a8db675 | /home/sinisha/miniconda3/lib/python3.6/heapq.py |
984,193 | e113ee078a8978732c2b744f49895ae2dc70406e | import numpy as np
import pandas as pd
from mi3gpu.utils.seqload import loadSeqs
import argparse
import sys
def get_marginals(seqs, A, weights, nrmlz=True):
nSeq, L = seqs.shape
if weights == '0':
weights = None
if A > 16: # the x + A*y operation below may overflow for u1
seqs = seqs.astype('i4')
if nrmlz:
nrmlz = lambda x: x/np.sum(x, axis=-1, keepdims=True)
else:
nrmlz = lambda x: x
def freqs(s, bins):
return np.bincount(s, minlength=bins, weights=weights)
f = nrmlz(np.array([freqs(seqs[:,i], A) for i in range(L)]))
ff = nrmlz(np.array([freqs(seqs[:,j] + A*seqs[:,i], A*A)
for i in range(L-1) for j in range(i+1, L)]))
return f, ff
'''
def get_marginals(msa_file, A, model_name):
# A = alphabet_size
# import msa using import_seqs
print("\t\t\t\timporting msa:\t\t\t", msa_file)
msa = seqload.loadSeqs(msa_file)[0]
print("\t\t\t\tfinished msa import:\t\t", msa_file)
#compute marginals, save to .npy files
print("\t\t\t\tcomputing msa marginals:\t", msa_file)
#uvms, bvms = getMarginals(msa, 21, weights)
uvms, bvms = compute_marginals(msa, A, 0)
uvms_file_name = model_name + "_uvms.npy"
bvms_file_name = model_name + "_bvms.npy"
np.save(parent_dir_name uvms_file_name, uvms)
np.save(bvms_file_name, bvms)
print("\t\t\t\tfinished computing marginals:\t", msa_file))
'''
def getL(size):
return int(((1+np.sqrt(1+8*size))//2) + 0.5)
def getLq(J):
return getL(J.shape[0]), int(np.sqrt(J.shape[1]) + 0.5)
def getUnimarg(ff):
L, q = getLq(ff)
ff = ff.reshape((L*(L-1)//2, q, q))
marg = np.array([np.sum(ff[0], axis=1)] +
[np.sum(ff[n], axis=0) for n in range(L-1)])
return marg/(np.sum(marg, axis=1)[:,None]) # correct any fp errors
def indepF(fab):
L, q = getLq(fab)
fabx = fab.reshape((fab.shape[0], q, q))
fa1, fb2 = np.sum(fabx,axis=2), np.sum(fabx,axis=1)
fafb = np.array([np.outer(fa, fb).flatten() for fa,fb in zip(fa1, fb2)])
return fafb
def getM(x, diag_fill=0):
L = getL(len(x))
M = np.empty((L,L))
M[np.triu_indices(L,k=1)] = x
M = M + M.T
M[np.diag_indices(L)] = diag_fill
return M
def get_covars(label, bvms_file, A, parent_dir_name, data_home, model_name):
covars_file_name = "covars_ " + label + "_" + model_name + ".npy"
# randomSeqs of VAE are in parent_dir, all others are in data_home
if label is "randomSeqs":
bvms_load_name = parent_dir_name + "/" + bvms_file
save_name = parent_dir_name + "/" + covars_file_name
else:
bvms_load_name = data_home + "/" + bvms_file
save_name = data_home + "/" + covars_file_name
C = bimarg - indepF(bvms_load_name)
np.save(save_name, C)
def get_bvms(label, msa_file, A, parent_dir_name, data_home, model_name):
bvms_file_name = "bvms_" + label + "_" + model_name + ".npy"
print("bvms_file_name:\t", bvms_file_name)
print("parent:\t", parent_dir_name)
print("data_home:\t", data_home)
print("model_name:\t", model_name)
# randomSeqs of VAE are in parent_dir, all others are in data_home
if label == "randomSeqs":
load_name = parent_dir_name + "/" + msa_file
save_name = parent_dir_name + "/" + bvms_file_name
else:
load_name = data_home + "/" + msa_file
save_name = data_home + "/" + bvms_file_name
print("\t\t\t\timporting msa for:\t", label, "\t", load_name)
msa = seqload.loadSeqs(load_name)[0]
print("\t\t\t\tfinished msa import for:\t", label)
print("\t\t\t\tcomputing bvms for:\t", label)
bvms = compute_bvms(msa, A, 0)
np.save(save_name, bvms)
print("\t\t\t\tfinished computing bvms for:\t", label)
return bvms_file_name
def compute_bvms(seqs, q, weights, nrmlz=True):
nSeq, L = seqs.shape
if weights == '0':
weights = None
if q > 16: # the x + q*y operation below may overflow for u1
seqs = seqs.astype('i4')
if nrmlz:
nrmlz = lambda x: x/np.sum(x, axis=-1, keepdims=True)
else:
nrmlz = lambda x: x
def freqs(s, bins):
return np.bincount(s, minlength=bins, weights=weights)
f = nrmlz(np.array([freqs(seqs[:,i], q) for i in range(L)]))
ff = nrmlz(np.array([freqs(seqs[:,j] + q*seqs[:,i], q*q)
for i in range(L-1) for j in range(i+1, L)]))
return ff
'''
def compute_bvms(seqs, A, weights, nrmlz=True):
nSeq, L = seqs.shape
if weights == '0':
weights = None
if A > 16: # the x + A*y operation below may overflow for u1
seqs = seqs.astype('i4')
if nrmlz:
nrmlz = lambda x: x/np.sum(x, axis=-1, keepdims=True)
else:
nrmlz = lambda x: x
def freqs(s, bins):
return np.bincount(s, minlength=bins, weights=weights)
ff = nrmlz(np.array([freqs(seqs[:,j] + A*seqs[:,i], A*A) for i in range(L-1) for j in range(i+1, L)]))
return ff
'''
|
984,194 | 72e3be76262f29434a8c1cea927506361e4af196 | # import Flask class from the flask module
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
# create a new instance of Flask and store it in app
app = Flask(__name__)
app.config['SECRET_KEY'] = str(os.getenv('SECRET_KEY_DB'))
app.config['SQLALCHEMY_DATABASE_URI']="mysql+pymysql://root:"+os.getenv("MYSQL_ROOT_PASSWORD")+"@mysql:3306/sfia2"
#"mysql+pymysql://root:" + str(os.getenv("DATABASE_URI")) + "@mysql:3306/sfia2"
#"mysql+pymysql://root:hannahandsam1@mysql:3306/sfia2"
db = SQLAlchemy(app)
# import the ./application/routes.py file
from application import routes |
984,195 | b58b581d7d448854914a98378a8f98120997ba2f | from functions import *
def getMyPosition(df):
df = pd.DataFrame(df).T
df3 = find_pairs(df)
final_dataframe = pd.DataFrame(index=np.arange(100), columns=np.arange(1))
final_dataframe.rename(columns = {0: 'number of shares'}, inplace=True)
final_dataframe['number of shares'] = 0
for i in df3.index[0:]:
pair1 = df3['level_0'][i]
pair2 = df3['level_1'][i]
pair_data = create_pairs_dataframe(df, pair1, pair2)
pair_data = calculate_spread_zscore(pair_data, pair1, pair2)
pair_data = signal_generate(pair_data, z_entry_threshold=0.8, z_exit_threshold=3.5)
pair_data = stonks(pair_data, pair1, pair2)
final_dataframe = final_data(final_dataframe, pair_data, pair1, pair2)
position_array = final_dataframe['number of shares'].to_numpy()
return position_array |
984,196 | 18e9beb2451919e767dc2b974ae29a8a07291103 | log = open('../log.txt')
lines = log.readlines()
log.close()
for i in lines:
if "C4 01 C1" in i:
print(i) |
984,197 | c7d18636ca19db74925a1ca0af55b33242d5f531 | # Generated by Django 4.0 on 2022-03-09 03:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data', '0075_dataset_num_record'),
]
operations = [
migrations.RemoveField(
model_name='dataset',
name='recordsPublished',
),
]
|
984,198 | b8adaee5cc0fa97829ffae6afc7f5b4a6227aced | # Hello World PyQt5
# import sys to handle exit status for the Qt window
import sys
# Required Qt widgets
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QWidget
# Create instance of QApplication
app = QApplication(sys.argv)
# Create instance of application GUI
window = QWidget()
window.setWindowTitle('PyQt5 App')
window.setGeometry(100,100,280,80)
window.move(60,15)
helloMsg = QLabel('<h1>Hello World!</h1>',parent=window)
helloMsg.move(60,15)
# Show app
window.show()
# Run application's event loop (main loop)
sys.exit(app.exec_())
|
984,199 | 3d9db3766ec195e772e83f7bedbe0e17117971be | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from django.db.backends.appengine.base import destroy_datastore, \
get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.