blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
62539ca1bcdb1bc7fff0af11dbfc49c1baef1e64 | 04bb110bb336eec314ca295eacfdda40a4f352f6 | /venv/Scripts/easy_install-script.py | 91010b81d8854894e1e7f6d46a0406434ac2dd95 | [] | no_license | AlainDaccache/Cell-Phone-Store-DB | 5cb8d195da0a95cd984867c81755f4d8abd71d6a | 0a252bd3990f02b23d35239c214c2d339185b941 | refs/heads/master | 2022-04-20T01:16:03.732299 | 2020-04-14T03:18:00 | 2020-04-14T03:18:00 | 243,381,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | #!C:\Users\Naji\IdeaProjects\COMP421D2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"alaindacc@gmail.com"
] | alaindacc@gmail.com |
273f561ef07f50e6deef9d3cf850e1fae3aea494 | b4b7e695730dd25b2c5ac7858462ba6958b92239 | /Problems 1-10/Euler9.py | 9616ca565c204caa938542cd977eb3405feefed6 | [] | no_license | LewisBray/ProjectEuler | ca42ca590121b160b96c28a30359060f8e80d6a7 | ed351e6339d56fc0454bb85e01f448c07d4e72f3 | refs/heads/master | 2020-04-27T03:52:21.836270 | 2019-06-18T19:19:56 | 2019-06-18T19:19:56 | 174,037,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | def triangles(max_side_length):
for a in range(1, max_side_length + 1):
for b in range(a + 1, max_side_length + 1):
for c in range(b + 1, max_side_length + 1):
yield (a, b, c)
def right_triangles(max_side_length):
for triangle in triangles(max_side_length):
if triangle[0]**2 + triangle[1]**2 == triangle[2]**2:
yield triangle
def solution_triangles(max_side_length):
for triangle in right_triangles(max_side_length):
if triangle[0] + triangle[1] + triangle[2] == 1000:
yield triangle
solution_triangle = list(solution_triangles(1000))[0]
product = solution_triangle[0] * solution_triangle[1] * solution_triangle[2]
print(product)
| [
"32199493+LewisBray@users.noreply.github.com"
] | 32199493+LewisBray@users.noreply.github.com |
f81db90c4e5e026bb5e8024a58b4d3cd22a39843 | ab98efea50ad29be850d0fcedd5885d2a533459b | /code/01_isotermas.py | edc78d9fa47397e5d17ae286817fb54c8c983ebc | [] | no_license | anvelascos/hydroclimatology | a608ef1bb235d9c7537b53f165bf8b2c7439e270 | c27f0ec8d8645a96eeaeac7c533e61684fa8d54a | refs/heads/master | 2021-01-20T15:27:32.945653 | 2017-03-15T17:59:37 | 2017-03-15T17:59:37 | 82,817,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
def dividir_variables():
xls_data = pd.ExcelFile('hc_data.xlsx')
nombres_vars = xls_data.sheet_names
for nombre_var in nombres_vars:
print(nombre_var)
nombre_salida = nombre_var + '_data.xlsx'
df_datos_var = xls_data.parse(nombre_var, index_col='Fecha')
df_datos_var.to_excel(nombre_salida, nombre_var)
xls_ts1 = pd.ExcelFile('TS_1_data.xlsx')
df_ts1 = xls_ts1.parse('TS_1', index_col='Fecha')
print(df_ts1.shape)
xls_catalogo = pd.ExcelFile('Catalogo_G2.xlsx')
df_estaciones_ts = xls_catalogo.parse('TS_1', index_col='Estacion')
mis_estaciones = df_estaciones_ts.index
df_ts1_g2 = df_ts1[mis_estaciones]
print(df_ts1_g2.shape)
nom_archivo_ts = 'TS_1_data_G2.xlsx'
# df_ts1_g2.to_excel(nom_archivo_ts, 'TS_1') # exportar a excel
meses = range(1, 13)
df_prom_lp = pd.DataFrame(index=meses, columns=mis_estaciones)
# print(df_prom_lp)
for mes in meses:
df_ts1_mes = df_ts1_g2[df_ts1_g2.index.month == mes]
df_prom_lp.loc[mes] = df_ts1_mes.mean()
for mes in meses:
print("\nMes: " + str(mes))
df_tmp_reg = df_estaciones_ts.copy()
df_tmp_reg[mes] = df_prom_lp.loc[mes]
print(df_tmp_reg.shape)
df_tmp_reg.dropna(axis=0, how='any', inplace=True)
# df_tmp_reg.drop(54025010, axis=0)
print(df_tmp_reg.shape)
# plt.scatter(df_tmp_reg['Elevacion'], df_tmp_reg[mes])
# nombre_figura = 'figs/Regresion_' + str(mes)
# plt.savefig(nombre_figura)
reg_results = stats.linregress(df_tmp_reg['Elevacion'], df_tmp_reg[mes])
alpha = reg_results[0]
beta = reg_results[1]
corr = reg_results[2]
r2 = corr ** 2
print("\nDatos de la regresion\nalpha = " + str(alpha) + '\nbeta = ' + str(beta) + '\nCorr = ' +
str(corr) + '\nR2 = ' + str(r2))
# plt.show() | [
"andres.velasco@javeriana.edu.co"
] | andres.velasco@javeriana.edu.co |
3cf0e063b91a5be11fd48040ca02637fab5c720d | cb1d0dd68b1136b8a371f7d2b423e45171e98ab7 | /src/xsd_trips/urls.py | 4d153c6902a452d9f38693e3b2a855184692fcd9 | [] | no_license | ScubaJimmE/xSACdb | 86640ab791327392f88eb4993c858aa6d340c758 | 1996ab286ee0446b0a0e38882104bbf8904d8bdc | refs/heads/develop | 2021-07-15T04:47:11.279138 | 2016-05-25T01:44:05 | 2016-05-25T01:44:05 | 62,212,226 | 0 | 0 | null | 2021-03-20T00:40:24 | 2016-06-29T09:09:50 | Python | UTF-8 | Python | false | false | 256 | py | from django.conf.urls import patterns, include, url
from django.conf import settings
from views import *
urlpatterns = patterns('',
url(r'^$', TripList.as_view(), name='TripList'),
url(r'^new/$', TripCreate.as_view(), name='TripCreate'),
)
| [
"will@fullaf.com"
] | will@fullaf.com |
0667eca8a90c8b284d972fbc882498ac45ac693a | 8dd01a98a32997d1b29536b3c9f210478a48ecf7 | /IDPP/state2nudged_2.py | 38f3009f713fa71e897b9e3188f9c991dc10c8a3 | [] | no_license | Thanhngocpham200893/STATE-instructions | 1218d7465f705d1c0f0c9c45fbdcf511891fe1ea | 09648efa24fcd6c23554b5e48fe1213f23b2321b | refs/heads/master | 2023-06-20T23:14:57.971800 | 2021-07-24T16:13:05 | 2021-07-24T16:13:05 | 282,809,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | """
#!/home/thanhpn/Env/bin/python3
Read the state input and write nudged_2
Usage: state2nudged_2.py nfinp_# spring_constant
"""
##-----import library here
import numpy as np
import sys
##-----end load library
##read the name of state input from command line
if len(sys.argv) == 3:
stateinp = str(sys.argv[1])
spring = float(sys.argv[2])
else:
sys.exit('state2nudged_2.py nfinp_# spring_constant')
with open(stateinp,"r") as inp:
dummy =inp.readline()
dummy =inp.readline()
ktyp = int(dummy.split()[2])
katm = int(dummy.split()[3])
print(katm)
inp.readline()
inp.readline()
inp.readline()
inp.readline()
inp.readline()
inp.readline()
inp.readline()
cps = np.zeros((katm,3),dtype = float)
ktyp_l = np.zeros((katm,1),dtype = int)
for i in range(katm):
dummy = inp.readline()
cps[i,0:3] = dummy.split()[0:3]
###print the output
with open('nudged_2', 'w+') as nudged:
nudged.truncate(0)
with open('nudged_2','a+') as nudged:
print('%.12f' %(spring),file = nudged)
for i in range(katm):
print(" %3i % 16.12f % 16.12f % 16.12f" %
(i+1,
cps[i,0],
cps[i,1],
cps[i,2]),file = nudged)
| [
"noreply@github.com"
] | Thanhngocpham200893.noreply@github.com |
176a6a148972ac79dbe9030c0394a9b8896d89e5 | 375019c0ce99e2168c72832cbf3c0a21c7f39a87 | /current_test/SConstruct | 609254e568e96965b8ab1fa4e6b440d6d2b35619 | [] | no_license | aselker/src | 367807994879aef78e2db890b82bbb7a41894f84 | 78e6dc8b2a574551859b55e0be63fa00d7f776fd | refs/heads/master | 2020-12-20T00:37:17.830850 | 2020-05-09T04:12:13 | 2020-05-09T04:12:13 | 235,900,498 | 0 | 0 | null | 2020-01-23T22:36:54 | 2020-01-23T22:36:53 | null | UTF-8 | Python | false | false | 897 |
env = Environment(PIC = '24FJ128GB206',
CC = 'xc16-gcc',
PROGSUFFIX = '.elf',
CFLAGS = '-g -omf=elf -x c -mcpu=$PIC',
LINKFLAGS = '-omf=elf -mcpu=$PIC -Wl,--script="app_p24FJ128GB206.gld"',
CPPPATH = '../lib')
env.PrependENVPath('PATH', '/opt/microchip/xc16/v1.41/bin')
bin2hex = Builder(action = 'xc16-bin2hex $SOURCE -omf=elf',
suffix = 'hex',
src_suffix = 'elf')
env.Append(BUILDERS = {'Hex' : bin2hex})
list = Builder(action = 'xc16-objdump -S -D $SOURCE > $TARGET',
suffix = 'lst',
src_suffix = 'elf')
env.Append(BUILDERS = {'List' : list})
env.Program('current_test', ['current_test.c',
'../lib/ajuart.c',
'../lib/elecanisms.c'])
env.Hex('current_test')
env.List('current_test')
| [
"rowansharman1@gmail.com"
] | rowansharman1@gmail.com | |
c83c218b4a9c1ac407301112c8089a0c5fd3fd99 | d3ed07e0f9069a05b4ec9e805429e18863357d03 | /config/includes.chroot/usr/local/lib/ROX-Lib2/tests/python/testoptions.py | 82e9fb015e9e04803649778b18a79c0b7f9dd072 | [] | no_license | machinebacon/livarp | 9df94ec7204eabbd21a6833ad4b7115ed5cdb6fd | aa49e5f2230353d53f6d79e5824859f027d81b03 | refs/heads/master | 2020-04-23T18:33:10.245826 | 2013-11-05T03:23:45 | 2013-11-05T03:23:45 | 14,131,204 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,065 | py | #!/usr/bin/env python2.6
import unittest
import os, sys, shutil
from os.path import dirname, abspath, join
rox_lib = dirname(dirname(dirname(abspath(sys.argv[0]))))
sys.path.insert(0, join(rox_lib, 'python'))
os.environ['CHOICESPATH'] = '/tmp/choices:/tmp/choices2'
os.environ['XDG_CONFIG_HOME'] = '/tmp/config'
from rox import basedir, choices, options
class TestOptions(unittest.TestCase):
def setUp(self):
for d in ['/tmp/choices', '/tmp/choices2', '/tmp/config']:
if os.path.isdir(d):
shutil.rmtree(d)
def testChoices(self):
group = options.OptionGroup('MyProg', 'Options')
o1 = options.Option('colour', 'red', group)
assert not os.path.isfile('/tmp/choices/MyProg/Options')
group.notify()
group.save()
assert os.path.isfile('/tmp/choices/MyProg/Options')
g2 = options.OptionGroup('MyProg', 'Options')
o1 = options.Option('colour', 'green', g2)
g2.notify()
self.assertEquals('red', o1.value)
def testXDG(self):
group = options.OptionGroup('MyProg', 'Options', 'site')
o1 = options.Option('colour', 'red', group)
assert not os.path.isfile('/tmp/config/site/MyProg/Options')
group.notify()
group.save()
assert os.path.isfile('/tmp/config/site/MyProg/Options')
g2 = options.OptionGroup('MyProg', 'Options', 'site')
o1 = options.Option('colour', 'green', g2)
g2.notify()
self.assertEquals('red', o1.value)
def testNotify(self):
self.c = 0
def notify():
self.c += 1
group = options.OptionGroup('MyProg', 'Options', 'site')
o1 = options.Option('colour', 'green', group)
group.add_notify(notify)
self.assertEquals(0, self.c)
group.notify()
self.assertEquals(1, self.c)
try:
options.Option('size', 'small', group)
raise Exception('Too late!')
except AssertionError:
pass
group.remove_notify(notify)
group.notify()
self.assertEquals(1, self.c)
assert not o1.has_changed
o1._set('hi')
assert o1.has_changed
group.notify()
assert not o1.has_changed
suite = unittest.makeSuite(TestOptions)
if __name__ == '__main__':
sys.argv.append('-v')
unittest.main()
| [
"bacon@linuxbbq.org"
] | bacon@linuxbbq.org |
da9e00f2af1599c983cb133c32b539da17ece7fe | 155fa6aaa4ef31cc0dbb54b7cf528f36743b1663 | /Static and Class Methods/Gym/subscription.py | c93a716815e0c338d34e9dadac30833811a61828 | [] | no_license | GBoshnakov/SoftUni-OOP | efe77b5e1fd7d3def19338cc7819f187233ecab0 | 0145abb760b7633ca326d06a08564fad3151e1c5 | refs/heads/main | 2023-07-13T18:54:39.761133 | 2021-08-27T08:31:07 | 2021-08-27T08:31:07 | 381,711,275 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | class Subscription:
_id = 0
def __init__(self, date, customer_id, trainer_id, exercise_id):
Subscription._id += 1
self.date = date
self.customer_id = customer_id
self.trainer_id = trainer_id
self.exercise_id = exercise_id
self.id = Subscription._id
@staticmethod
def get_next_id():
return Subscription._id + 1
def __repr__(self):
return f"Subscription <{self.id}> on {self.date}"
| [
"boshnakov.g@gmail.com"
] | boshnakov.g@gmail.com |
cf0bc4a4904ba5ea0b1a33c31385a14eaf269f3c | 02546f99c91e46d66055ba8022f00619dbf9edcf | /lungsc/figures_immune_paper/fig4.py | 7d3f5ff2df49c1e027f182ebf3521fba76bec829 | [
"MIT"
] | permissive | iosonofabio/lung_neonatal_immune | 84a137492242a3946873e567db9eea531a90ecd6 | d0f12d4c24a778d0b7b8febf7accbc46adb7c162 | refs/heads/master | 2022-11-15T15:11:57.574654 | 2020-07-08T00:56:54 | 2020-07-08T00:56:54 | 258,082,671 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,274 | py | # vim: fdm=indent
'''
author: Fabio Zanini
date: 12/07/19
content: Plot panels for Fig 4.
'''
import os
import sys
import glob
import gzip
import pickle
import subprocess as sp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import seaborn as sns
from lungsc.pilots.load_dataset import DatasetLung, versions
ctms = ['Mac I', 'Mac II', 'Mac III', 'Mac IV', 'Mac V']
fig_fdn = '../../figures/immune_paper_figs/immune_paper_figure_4/'
if __name__ == '__main__':
ds0 = DatasetLung.load(preprocess=True, version=versions[-2])
ds0.query_samples_by_metadata(
'(cellType == "immune") & (doublet == 0)', inplace=True)
ds = ds0.query_samples_by_metadata('cellSubtype in @ctms', local_dict=locals())
if False:
print('Feature selection')
features = ds.feature_selection.overdispersed_within_groups('Mousename', inplace=False)
dsf = ds.query_features_by_name(features)
print('PCA')
dsc = dsf.dimensionality.pca(n_dims=25, robust=False, return_dataset='samples')
print('tSNE')
vs = dsc.dimensionality.tsne(perplexity=30)
print('Load tSNE from file')
vs = pd.read_csv(
'../../data/sequencing/datasets/all_{:}/tsne_immune.tsv'.format(versions[-2]),
sep='\t',
index_col=0,
)
vs = vs.loc[ds.samplenames]
if True:
print('Plot tSNE with Gal, Car4, Itgax, C1qa, Plac8, Ifitm6')
genes = ['Gal', 'H2-Eb1', 'Itgax', 'Car4', 'C1qa', 'Plac8', 'Ifitm6']
for gene in genes:
fig, ax = plt.subplots(figsize=(4.8, 4.2))
ds.plot.scatter_reduced_samples(
vs,
ax=ax,
s=12,
alpha=0.30,
cmap='viridis',
color_by=gene,
color_log=True,
)
ax.grid(False)
ax.set_axis_off()
fig.tight_layout()
if True:
for ext in ['svg', 'pdf', ['png', 600]]:
if isinstance(ext, list):
ext, dpi = ext
fig.savefig(fig_fdn+'immune_tsne_{:}.{:}'.format(
gene, ext),
dpi=dpi)
else:
fig.savefig(fig_fdn+'immune_tsne_{:}.{:}'.format(
gene, ext))
if True:
print('Make table with top DE genes within macrophages')
fn_comp = '../../data/gene_lists/immune_DEGs_macros.pkl'
if not os.path.isfile(fn_comp):
comps = {}
for cst in ctms:
print('DE for {:}'.format(cst))
ds.samplesheet['is_focal'] = ds.samplesheet['cellSubtype'] == cst
dsp = ds.split('is_focal')
# Subsample
for key in dsp:
if dsp[key].n_samples > 300:
dsp[key].subsample(300, inplace=True)
comp = dsp[True].compare(dsp[False])
comp['log2_fc'] = np.log2(dsp[True].counts.mean(axis=1) + 0.1) - np.log2(dsp[False].counts.mean(axis=1) + 0.1)
comp.name = cst
comps[cst] = comp
del ds.samplesheet['is_focal']
with open(fn_comp, 'wb') as f:
pickle.dump(comps, f)
else:
with open(fn_comp, 'rb') as f:
comps = pickle.load(f)
if False:
print('Save tables to file')
tops = {}
for cst in ctms:
fn_comp_tsv = '../../data/gene_lists/immune_DEGs_{:}.tsv'.format(mp)
comp = comps[cst]
top = comp.loc[comp['log2_fc'] > 0].nlargest(50, 'statistic')
tops[cst] = top
top.to_csv(fn_comp_tsv, sep='\t', index=True)
top_sum = pd.DataFrame([], index=np.arange(50))
for mp, top in tops.items():
top_sum[mp] = top.index
fn_comp_tsv_sum = '../../data/gene_lists/immune_DEGs_macros_summary.tsv'
top_sum.to_csv(fn_comp_tsv_sum, sep='\t', index=False)
if True:
print('Plot heatmap with single top DE genes')
tops = {}
for cst, comp in comps.items():
tops[cst] = comp.loc[comp['log2_fc'] > 0].nlargest(5, 'statistic').index.tolist()
genes = sum(tops.values(), [])
genes = [
# Common
'Ptprc',
'Cd68',
'Axl',
'Dab2',
# Mac I
'Gal',
'Mcm5',
'Mcm2',
'Mcm3',
'Mcm4',
'Mcm6',
'Bub1',
'Plk1',
'Top2a',
'Mki67',
# Mac II,
'Car4',
'Atp6v0d2',
'Mgll',
'Krt19',
'Slc39a2',
'Coro6',
'Marco',
'Siglecf',
'Gpnmb',
'Ear1',
'Cd200r4',
'Ctsk',
'Ly75',
'Bhlhe41',
'Slc7a2',
'Cdh1',
'Pex11a',
# Mac III
'Itgax',
'Adgrl3',
# Mac IV
'Fcrls',
'Pf4',
'C1qa',
'C1qb',
'C1qc',
'C3ar1',
'Tmem176b',
'Cxcl12',
'Ccl12',
'Cxcl16',
'Stab1',
'Ms4a7',
'Ms4a4a',
'Igfbp4',
'Apoe',
'Lgmn',
'Maf',
# Mac V
'Pla2g7',
'Ifitm2',
'Ifitm3',
'Ifitm6',
'Plac8',
'Pglyrp1',
'Serpinb10',
'Adgre4',
'Adgre5',
'Napsa',
'Rnase6',
'Fyb',
'Clec4a1',
'Itga4',
'Samhd1',
]
data = pd.DataFrame([], index=genes)
for cst in ctms:
dsi = ds.query_samples_by_metadata(
'cellSubtype == @cst',
local_dict=locals(),
)
mat = np.log10(0.1 + dsi.counts.loc[genes]).mean(axis=1)
data[cst] = mat
# Normalize by max expression of that gene
data += 1
data = (data.T / data.max(axis=1)).T
fig, ax = plt.subplots(figsize=(3, 10.5))
sns.heatmap(
data,
ax=ax,
cmap='plasma',
vmin=0,
vmax=1,
fmt='.1f',
xticklabels=True,
yticklabels=True,
cbar=False,
)
for tk in ax.get_yticklabels():
tk.set_rotation(0)
for tk in ax.get_xticklabels():
tk.set_rotation(90)
ax.set_xlim(0, 5)
ax.set_ylim(len(genes), 0)
fig.tight_layout()
if True:
fig.savefig(fig_fdn+'heatmap_single_genes_full.png')
if True:
print('Plot heatmap with pathways')
pathways = [
('cell cycle', ['Ncapd2', 'Mcm5', 'Mcm7', 'Cdca8', 'Smc2']),
('glycolysis', ['Cluh', 'Dbi', 'Eno1', 'Ldha', 'Pkm']),
('lipid metabolism', ['Lpl', 'Lipa', 'Abcg1', 'Sdc4', 'Abca9', 'Abca1']),
('matrix\nremodelling', ['Crispld2', 'Spint1', 'Tgm2']),
('angiogenesis', ['Fn1', 'Il18', 'Axl', 'Gas6', 'Pf4', 'Apoe']),
('alveolar', ['Adgrl3', 'Clec4n', 'Pparg', 'Ear2', 'Itgax', 'Car4', 'Bhlhe41', 'Trim29']),
('cell migration', ['Ccr2', 'Ccr5', 'Cx3cr1', 'Cxcl16', 'Cxcl2']),
('antibacterial', ['Acp5', 'Mpeg1', 'Plac8', 'Rnase6', 'Lyz2']),
('complement', ['C1qc', 'C1qa', 'C1qb', 'C3ar1']),
('alternative\nactivation', ['Ms4a8a', 'Axl', 'Il18', 'Maf', 'Lgmn']),
('type-I IFN', ['Adgrl3', 'Ifitm6', 'Ifitm3', 'Ifi27l2a', 'Ifitm2']),
('neg reg of\ninflammation', ['Cd200r4', 'Gpnmb', 'Il1rn', 'Dapk1', 'Dok2', 'Cd300a', 'Nr4a1', 'Lst1']),
]
genes = sum((x[1] for x in pathways), [])
data = pd.DataFrame([], index=genes)
for cst in ctms:
dsi = ds.query_samples_by_metadata(
'cellSubtype == @cst',
local_dict=locals(),
)
mat = np.log10(0.1 + dsi.counts.loc[genes]).mean(axis=1)
data[cst] = mat
# Normalize by max expression of that gene
data += 1
data = (data.T / data.max(axis=1)).T
fig, axs = plt.subplots(
2, 1, figsize=(11, 4), sharex=True,
gridspec_kw={'height_ratios': [1, 20]})
sns.heatmap(
data.iloc[:, :5].T,
ax=axs[1],
cmap='plasma',
vmin=0,
vmax=1,
fmt='.1f',
xticklabels=True,
yticklabels=True,
cbar=False,
)
for tk in axs[1].get_yticklabels():
tk.set_rotation(0)
for tk in axs[1].get_xticklabels():
tk.set_rotation(90)
tk.set_fontsize(8)
axs[1].set_ylim(5, 0)
axs[1].set_xlim(0, len(genes))
i = 0
for ipw, (pw, gns) in enumerate(pathways):
if i != 0:
axs[1].plot([i] * 2, [0, len(genes)], lw=2, color='lightgrey', alpha=0.9)
i += len(gns)
# Legend
labels = ['none', 'low', 'mid', 'high']
sfun = plt.cm.plasma
handles = [
axs[1].scatter([], [], marker='s', s=50, color=sfun(0)),
axs[1].scatter([], [], marker='s', s=50, color=sfun(0.33)),
axs[1].scatter([], [], marker='s', s=50, color=sfun(0.67)),
axs[1].scatter([], [], marker='s', s=50, color=sfun(1.0)),
]
leg = axs[1].legend(
handles, labels,
title='Gene\nexpression:',
bbox_to_anchor=(1.01, 0.99),
loc='upper left',
)
axs[0].set_ylim(0, 1)
axs[0].set_xlim(0, len(genes))
color_d = dict(zip(
(x[0] for x in pathways),
sns.color_palette('muted', n_colors=len(pathways)),
))
i = 0
for ipw, (pw, gns) in enumerate(pathways):
w = len(gns)
rect = plt.Rectangle(
(i, 0), w, 1,
facecolor=color_d[pw],
edgecolor='none',
lw=0,
)
axs[0].add_artist(rect)
wt = i + 0.5 * w - 0.15 * w * (pw == 'lipid metabolism')
ht = 2 + 1.5 * (ipw % 2)
axs[0].text(
wt, ht, pw, ha='center', va='bottom',
fontsize=10,
clip_on=False,
)
if ipw % 2:
axs[0].plot(
[wt] * 2, [ht - 0.2, 1.2], lw=1, color='k',
clip_on=False,
)
i += w
axs[0].set_axis_off()
fig.tight_layout(h_pad=0.01)
if True:
fig.savefig(fig_fdn+'heatmap_pathways.png')
plt.ion()
plt.show()
| [
"fabio.zanini@fastmail.fm"
] | fabio.zanini@fastmail.fm |
a512fa1461bb4cd320aa589a8ba0132de7b969a4 | c3ac0b1b355e1602c833f6a20f202778689ee662 | /wmendezblog/core/blog/rss.py | cd32fc9a5639dfbeb80557eaf063cac8b115d58b | [] | no_license | Wilo/GAB2017 | b4db1c4189f386104594137cef421adc2e4aeac3 | a29854edb8211feec5db6364e2fb9d74d88a5d51 | refs/heads/master | 2021-01-20T00:01:27.006918 | 2017-04-22T13:09:22 | 2017-04-22T13:09:22 | 89,068,249 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # -*- coding: utf-8 -*-
from os.path import splitext
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from .models import Post, CategoriaPost
class BlogFeed(Feed):
title = "In.planet"
link = "/rss/"
description = "In.planet, Más que internet"
def items(self):
return Post.objects.order_by('-fech_crea')
def item_title(self, item):
return item.titulo
def item_pubdate(self, item):
return item.fech_crea
def item_description(self, item):
return item.descripcion
def item_link(self, item):
return reverse('entrada', args=[item.slug])
| [
"wmendezc19@gmail.com"
] | wmendezc19@gmail.com |
459ddf3a35137abec5b034b8a503cfc1f2c79a89 | 02984c41ad9934a41de106d8be405cb65b265a3f | /matrix/hw4.py | 171cbdf18c4bdf686b10a5e73fbe5406e30601e0 | [] | no_license | pcp135/C-CtM | 865637a19c0ae3494cfa1f895a517cfe053981c6 | 3bbe833a4501ec994c48f8c9c747bf0e785a5d9e | refs/heads/master | 2016-09-05T10:05:58.367061 | 2013-09-08T00:34:37 | 2013-09-08T00:34:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,368 | py | # version code 892
# Please fill out this stencil and submit using the provided submission script.
from GF2 import one
from math import sqrt, pi
from matutil import coldict2mat
from solver import solve
from vec import Vec
## Problem 1
# For each part, please provide your solution as a list of the coefficients for
# the generators of V.
#
# For example, [1, 3, 5] would mean 1*[2,0,4,0] + 3*[0,1,0,1] + 5*[0,0,-1,-1]
rep_1 = [1,1,0]
rep_2 = [0.5,1,1]
rep_3 = [0,1,-1]
## Problem 2
# For each part, please provide your solution as a list of the coefficients for
# the generators of V.
lin_comb_coefficients_1 = [3,-1,1]
lin_comb_coefficients_2 = [0.5,-1.5,1]
lin_comb_coefficients_3 = [0.5,-5.5,4]
lin_comb_coefficients_4 = [1,-2,1]
## Problem 3
# Use one from the GF2 module, not the integer 1.
# For each part, please provide your solution as a list of the coefficients for
# the generators of V.
gf2_rep_1 = [one,0,one,0]
gf2_rep_2 = [one,0,0,one]
gf2_rep_3 = [one,one,0,one]
## Problem 4
# Use one from the GF2 module, not the integer 1.
# For each part, please provide your solution as a list of the coefficients for
# the generators of V.
gf2_lc_rep_1 = [one,one,one,0,0,0,0,0]
gf2_lc_rep_2 = [0,0,0,0,0,0,one,one]
gf2_lc_rep_3 = [one,0,0,one,0,0,0,0]
gf2_lc_rep_4 = [one,0,one,0,0,0,0,0]
## Problem 5
# For each part, please provide your solution as a list of the coefficients for
# the generators of V.
lin_dep_R_1 = [-2,1,1]
lin_dep_R_2 = [-28,7,-4]
lin_dep_R_3 = [-0.3,0,0,1,3]
## Problem 6
# Please record your solution as a list of coefficients
linear_dep_R_1 = [-1/3,1/3,-1]
linear_dep_R_2 = [-2*sqrt(2),-sqrt(2)/pi,-1]
linear_dep_R_3 = [1,1,1,1,1]
## Problem 7
# Assign the COEFFICIENT of the vector to each variable.
# Assign sum_to to the vector that you are expressing as a linear combination
# of the other two. Write the name of the vector as a STRING. i.e. 'u' or 'w'
u = -1
v = 1
w = 0
sum_to = 'w'
## Problem 8
# Please use the Vec class to represent your vectors
indep_vec_1 = Vec({0, 1, 2},{0: 1})
indep_vec_2 = Vec({0, 1, 2},{1: 1})
indep_vec_3 = Vec({0, 1, 2},{2: 1})
indep_vec_4 = Vec({0, 1, 2},{0: 1,1: 1,2: 1})
## Problem 9
# Please give your solution as a list of coefficients of the linear combination
zero_comb_1 = [one,one,0,one]
zero_comb_2 = [0,one,one,one]
zero_comb_3 = [one,one,0,0,one]
## Problem 10
# Please give your solution as a list of coefficients of the vectors
# in the set in order (list the coefficient for v_i before v_j if i < j).
sum_to_zero_1 = [0,one,0,one,one]
sum_to_zero_2 = [0,one,0,one,one,0,0]
sum_to_zero_3 = [one,0,one,one,one]
sum_to_zero_4 = [one,one,one,one,one,0,0]
## Problem 11
## Please express your answer a list of ints, such as [1,0,0,0,0]
exchange_1 = [0,0,1,0,0]
exchange_2 = [0,0,0,1,0]
exchange_3 = [0,0,1,0,0]
## Problem 12
# Please give the name of the vector you want to replace as a string (e.g. 'v1')
replace_1 = 'v3'
replace_2 = 'v1'
replace_3 = 'v4'
## Problem 13
def rep2vec(u, veclist):
'''
Input:
- u: a vector as an instance of your Vec class with domain set(range(len(veclist)))
- veclist: a list of n vectors (as Vec instances)
Output:
vector v (as Vec instance) whose coordinate representation is u
Example:
>>> a0 = Vec({'a','b','c','d'}, {'a':1})
>>> a1 = Vec({'a','b','c','d'}, {'b':1})
>>> a2 = Vec({'a','b','c','d'}, {'c':1})
>>> rep2vec(Vec({0,1,2}, {0:2, 1:4, 2:6}), [a0,a1,a2]) == Vec({'a', 'c', 'b', 'd'},{'a': 2, 'c': 6, 'b': 4, 'd': 0})
True
'''
return coldict2mat(veclist)*u
## Problem 14
def vec2rep(veclist, v):
'''
Input:
- veclist: a list of vectors (as instances of your Vec class)
- v: a vector (as Vec instance) with domain set(range(len(veclist)))
with v in the span of set(veclist).
Output:
Vec instance u whose coordinate representation w.r.t. veclist is v
Example:
>>> a0 = Vec({'a','b','c','d'}, {'a':1})
>>> a1 = Vec({'a','b','c','d'}, {'b':1})
>>> a2 = Vec({'a','b','c','d'}, {'c':1})
>>> vec2rep([a0,a1,a2], Vec({'a','b','c','d'}, {'a':3, 'c':-2})) == Vec({0, 1, 2},{0: 3.0, 1: 0.0, 2: -2.0})
True
'''
return solve(coldict2mat(veclist),v)
## Problem 15
def is_superfluous(L, i):
'''
Input:
- L: list of vectors as instances of Vec class
- i: integer in range(len(L))
Output:
True if the span of the vectors of L is the same
as the span of the vectors of L, excluding L[i].
False otherwise.
Examples:
>>> a0 = Vec({'a','b','c','d'}, {'a':1})
>>> a1 = Vec({'a','b','c','d'}, {'b':1})
>>> a2 = Vec({'a','b','c','d'}, {'c':1})
>>> a3 = Vec({'a','b','c','d'}, {'a':1,'c':3})
>>> is_superfluous(L, 3)
True
>>> is_superfluous([a0,a1,a2,a3], 3)
True
>>> is_superfluous([a0,a1,a2,a3], 0)
True
>>> is_superfluous([a0,a1,a2,a3], 1)
False
'''
if len(L)>1:
l=L.copy()
b=l.pop(i)
M=coldict2mat(l)
solution=solve(M,b)
residual=M*solution-b
if residual*residual<10e-14:
return True
return False
## Problem 16
def is_independent(L):
'''
input: a list L of vectors (using vec class)
output: True if the vectors form a linearly independent list.
>>> vlist = [Vec({0, 1, 2},{0: 1, 1: 0, 2: 0}), Vec({0, 1, 2},{0: 0, 1: 1, 2: 0}), Vec({0, 1, 2},{0: 0, 1: 0, 2: 1}), Vec({0, 1, 2},{0: 1, 1: 1, 2: 1}), Vec({0, 1, 2},{0: 0, 1: 1, 2: 1}), Vec({0, 1, 2},{0: 1, 1: 1, 2: 0})]
>>> is_independent(vlist)
False
>>> is_independent(vlist[:3])
True
>>> is_independent(vlist[:2])
True
>>> is_independent(vlist[1:4])
True
>>> is_independent(vlist[2:5])
True
>>> is_independent(vlist[2:6])
False
>>> is_independent(vlist[1:3])
True
>>> is_independent(vlist[5:])
True
'''
for i in range(len(L)):
if is_superfluous(L,i):
return False
return True
## Problem 17
def superset_basis(S, L):
'''
Input:
- S: linearly independent list of Vec instances
- L: list of Vec instances such that every vector in S is in Span(L)
Output:
Linearly independent list T containing all vectors (as instances of Vec)
such that the span of T is the span of L (i.e. T is a basis for the span
of L).
Example:
>>> a0 = Vec({'a','b','c','d'}, {'a':1})
>>> a1 = Vec({'a','b','c','d'}, {'b':1})
>>> a2 = Vec({'a','b','c','d'}, {'c':1})
>>> a3 = Vec({'a','b','c','d'}, {'a':1,'c':3})
>>> superset_basis([a0, a3], [a0, a1, a2]) == [Vec({'a', 'c', 'b', 'd'},{'a': 1}), Vec({'a', 'c', 'b', 'd'},{'b':1}),Vec({'a', 'c', 'b', 'd'},{'c': 1})]
True
'''
T=S.copy()
for l in L:
if is_independent(T+[l]):
T.append(l)
return T
## Problem 18
def exchange(S, A, z):
'''
Input:
- S: a list of vectors, as instances of your Vec class
- A: a list of vectors, each of which are in S, with len(A) < len(S)
- z: an instance of Vec such that A+[z] is linearly independent
Output: a vector w in S but not in A such that Span S = Span ({z} U S - {w})
Example:
>>> S = [list2vec(v) for v in [[0,0,5,3],[2,0,1,3],[0,0,1,0],[1,2,3,4]]]
>>> A = [list2vec(v) for v in [[0,0,5,3],[2,0,1,3]]]
>>> z = list2vec([0,2,1,1])
>>> exchange(S, A, z) == Vec({0, 1, 2, 3},{0: 0, 1: 0, 2: 1, 3: 0})
True
'''
T=S.copy()+[z]
for i in range(len(T)):
if is_superfluous(T,i) and T[i] != z and T[i] not in A:
return T[i]
| [
"pcp135@gmail.com"
] | pcp135@gmail.com |
215c8e803e47326ed81c83fa1d0823256a1df1f9 | 9b57f736d6133036ab139ec6195ad2fff5d876a1 | /accounts/models.py | b1c021f6ab440a0d739d492a8461b473668a3848 | [] | no_license | ashshakya/Confession | 388a99b43246646a26856870f35a522225b01233 | 2fcc216ee8805b21e496793a650ada9cc2dfb694 | refs/heads/develop | 2022-11-10T20:36:41.916598 | 2022-10-28T06:08:55 | 2022-10-28T06:08:55 | 142,250,474 | 2 | 2 | null | 2022-10-28T06:08:56 | 2018-07-25T05:12:50 | CSS | UTF-8 | Python | false | false | 3,037 | py | import datetime
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
from django.db.models.signals import post_save
from blog.models import *
REQUEST_CHOICES = (
('api', 'api'),
('template', 'template')
)
DEGREE = (
('metric', 'Metriculate'),
('inter', 'Intermediate'),
('grad', 'Graduation'),
('pg', 'Post Graduation'),
('dr', 'Doctorate'),
)
YEAR_CHOICES = [(r, r) for r in range(1950, datetime.date.today().year + 5)]
month_list = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
MONTH = [(i.lower(), i) for i in month_list]
RATING_CHOICES = [(r, r) for r in range(0, 101)]
class UserInfo(models.Model):
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
primary_key=True
)
avatar = models.ImageField(upload_to='profiles', blank=True, default='/profiles/python.png')
intro = models.CharField(max_length=1000, blank=True, null=True)
bio = RichTextUploadingField(blank=True, null=True)
slug = models.SlugField()
# Signal to create a user profile whenever a new user create.
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserInfo.objects.create(user=instance, slug=instance.username)
post_save.connect(create_user_profile, sender=User)
class SkillSet(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
)
skill = models.CharField(
max_length=255,
blank=False,
null=False
)
rating = models.IntegerField(
choices=RATING_CHOICES,
blank=False,
null=False,
default=0
)
class Meta:
unique_together = (('skill', 'user'),)
class Qualification(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
)
qualification = models.CharField(
max_length=255,
choices=DEGREE,
null=False
)
specialization = models.CharField(max_length=255, null=True, blank=True)
grade = models.CharField(max_length=10, null=False)
from_year = models.IntegerField(('year'), choices=YEAR_CHOICES)
completion_year = models.IntegerField(('year'), choices=YEAR_CHOICES)
achievement = models.TextField(blank=True) # RichTextUploadingField()
class Experience(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
)
designation = models.CharField(max_length=255, null=False)
org_name = models.CharField(max_length=255, null=True)
start_month = models.CharField(max_length=255, choices=MONTH)
start_year = models.IntegerField(('year'), choices=YEAR_CHOICES)
end_month = models.CharField(max_length=255, choices=MONTH, blank=True, null=True)
completion_year = models.IntegerField(('year'), choices=YEAR_CHOICES, blank=True, null=True)
present_working = models.BooleanField(default=False)
description = models.TextField(blank=True)
| [
"asashwanishakya@gmail.com"
] | asashwanishakya@gmail.com |
0971ebda5bdec00fd23231ca3d3cfd503aabf4a9 | 24807ea1008e5ae76c056ac9fe7ebb5844d1f82c | /utils/termVisInput | ca28a2072c52068ff6912477bf38b95081cf33d6 | [] | no_license | hsartoris/sproj | 223ef60cb073038f8983a67d0b9e7c7e55d51bdf | 120666ac6debeac2e3e65155204de7a02d407182 | refs/heads/master | 2021-01-23T17:44:07.509644 | 2018-05-29T04:15:34 | 2018-05-29T04:15:34 | 102,767,854 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | #!/usr/bin/python3
import sys
import numpy as np
if not len(sys.argv) >= 2:
print("Usage: termVisInput <spikeTimeRaster> [timesteps]")
sys.exit()
trunc = None
if len(sys.argv) == 3:
trunc = int(sys.argv[2])
print("Truncating data at " + str(trunc) + " timesteps")
spikes = np.loadtxt(sys.argv[1], delimiter=',').astype(int)
if trunc:
spikes = spikes[:,:trunc]
n = len(spikes)
spikes = spikes.transpose()
print("-"*(n+2))
for step in spikes:
print("|" + "".join([("#" if i == 1 else " ") for i in step]) + "|")
print("-"*(n+2))
| [
"hsartoris@gmail.com"
] | hsartoris@gmail.com | |
82c8ecc1af337f88dd1d91b4da0e659d84168878 | b6ae4c2070add8a645544cc20abd4ac8b503fde2 | /Desktop/rpi3_workshop/pi program/led2.py.save | b37070dc59890c13046ea62219840761ea9e73cd | [] | no_license | venkatvijay2309/raspberry_pi_basics | 23718277ed7f1c08907c27b12405ff637d135786 | 37df1f320efe3c5f5ef191099118d5c504eefe2e | refs/heads/master | 2020-03-21T02:11:55.455092 | 2018-06-20T05:50:24 | 2018-06-20T05:50:24 | 137,986,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | save | #!/usr/bin/python
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11,GPIO.OUT)
while True:
GPIO.output(18,True)
time.sleep(1#!/usr/bin/python
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11,GPIO.OUT)
while True: GPIO.output(18,False)
time.sleep(1)
| [
"venkat_vijay23@yahoo.com"
] | venkat_vijay23@yahoo.com |
2516bbcbdef9bc13e284ee5f474496f3d5fb0045 | 07461890fd1e8fc2180a876d7429ece95dfc9655 | /cspp1-practice/m11/p3/assignment3.py | 1beabf3db1c69075ec570e4c012e928af4d17a0e | [] | no_license | kranthikumar27/demo | c566b8755b1755936630d0928686c43b552eedd8 | 6ef7e5150c079525f1ac679bb9194c4c0fba98e7 | refs/heads/master | 2021-07-10T22:41:02.555667 | 2019-01-25T07:50:31 | 2019-01-25T07:50:31 | 142,857,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,643 | py | # Assignment-3
'''
At this point, we have written code to generate a random hand and display that hand to the user. We can also ask the user for a word (Python's input) and score the word (using your getWordScore). However, at this point we have not written any code to verify that a word given by a player obeys the rules of the game. A valid word is in the word list; and it is composed entirely of letters from the current hand. Implement the isValidWord function.
Testing: Make sure the test_isValidWord tests pass. In addition, you will want to test your implementation by calling it multiple times on the same hand - what should the correct behavior be? Additionally, the empty string ('') is not a valid word - if you code this function correctly, you shouldn't need an additional check for this condition.
Fill in the code for isValidWord in ps4a.py and be sure you've passed the appropriate tests in test_ps4a.py before pasting your function definition here.
'''
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
for letters_word in word:
if letters_word in hand:
if word in wordList:
return True
return False
def main():
word=input()
n=int(input())
adict={}
for i in range(n):
data=input()
l=data.split()
adict[l[0]]=int(l[1])
l2=input().split()
print(isValidWord(word,adict,l2))
if __name__== "__main__":
main() | [
"kranthikumarpinku@msitprogram.net"
] | kranthikumarpinku@msitprogram.net |
5690b8a65b35121276c3493f5273eae7f9e1b7fb | 609ee4aad38036c29456581f821a9bad4d6b729a | /tests/test_pay.py | 37b2629d535646bc20848fbf05772211f9a8c3b2 | [] | no_license | sdkwe/pywe-pay | 32f14d218b0f8c029fb08a54df99ba70b90374b4 | daf1699c7dafd0960359b0c3f570f32cc906dc5f | refs/heads/master | 2020-05-29T15:13:05.371833 | 2020-04-27T07:32:54 | 2020-04-27T07:32:54 | 62,115,428 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # -*- coding: utf-8 -*-
import time
from pywe_pay import WeChatPay
from local_config_example import WechatPayConfig
class TestPayCommands(object):
def test_native_unifiedorder(self):
native = WechatPayConfig.get('JSAPI', {})
wxpay = WeChatPay(native.get('appID'), native.get('apiKey'), native.get('mchID'))
result = wxpay.order.create(body=u'支付测试', notify_url='https://a.com', out_trade_no=int(time.time() * 1000), total_fee=1, trade_type='NATIVE')
assert isinstance(result, dict)
assert result.get('return_code') == 'SUCCESS'
assert result.get('result_code') == 'SUCCESS'
assert result.get('code_url')
| [
"brightcells@gmail.com"
] | brightcells@gmail.com |
42a62da8e1d51a7a3b3e573cdd0c1b6b3f423315 | 80afa26ba73b53f38e3fc21bf395030762fe8981 | /576. Out of Boundary Paths.py | 5481266d25818462836a2c72949c9f604ad39dc5 | [] | no_license | iamshivamgoswami/Random-DSA-Questions | 45b402063dbd2e31da2eee7590b6991aa624637d | e36250d08cf0de59cd0a59b4f3293e55793b1a6f | refs/heads/main | 2023-07-15T15:48:36.363321 | 2021-08-26T03:40:47 | 2021-08-26T03:40:47 | 392,702,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | class Solution:
def findPaths(self, m: int, n: int, maxMove: int, i: int, j: int) -> int:
d = {}
def dfs(N, i, j):
if (i, j, N) in d:
return d[(i, j, N)]
if i == m or i < 0 or j == n or j < 0:
return 1
if N == 0:
return 0
s = 0
for x, y in [(i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)]:
s += (dfs(N - 1, x, y))
d[(i, j, N)] = s
return s
return dfs(maxMove, i, j) % (10 ** 9 + 7)
| [
"shivamgoswami12@gmail.com"
] | shivamgoswami12@gmail.com |
ddbe67fad345be8ff4729ecc6e4784075d0dc64b | f6bcd45072eac1cc0dcb1a8238cb3ef0b94717c6 | /Python WebPage/Chapter 14/py4e_14_2.py | 9b8c187ba8ba11872d35a703077b87f558fe413e | [] | no_license | fmrigueiro/Py4e | e2bb026b76052e61772bd5dff92b0c278c98c5b4 | df0f965b9e525a0e20243f956ae205284f23b27e | refs/heads/master | 2020-04-03T08:00:51.736763 | 2019-01-31T21:34:42 | 2019-01-31T21:34:42 | 155,120,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | import urllib.request, urllib.parse, urllib.error
import xml.etree.ElementTree as ET
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
input = '''
<stuff>
<users>
<user x="2">
<id>001</id>
<name>Chuck</name>
</user>
<user x="7">
<id>009</id>
<name>Brent</name>
</user>
</users>
</stuff>'''
tree = ET.fromstring(input)
lst = tree.findall('users/user')
print('User count:', len(lst))
for item in lst:
print('Name', item.find('name').text)
print('Id', item.find('id').text)
print('Attribute', item.get('x')) | [
"fmrigueiro@gmail.com"
] | fmrigueiro@gmail.com |
a1f47cdb2a7e49ebb8fabff54cbeae52f0c7da79 | 7bce8261a0bad8809604b9ebc27ff3c24e6a56eb | /dashboard/TEST2.py | 95abc9ba560cecb9fb0f19609499f60cdfe7b091 | [] | no_license | Slavian2015/Defi | 3d1285a1f152c50c5bcc46e80b2304cf22d98e29 | 0c0edd1c9ee664b28d069005680cf0a9015e7095 | refs/heads/master | 2023-06-08T12:36:07.527515 | 2021-07-02T11:52:33 | 2021-07-02T11:52:33 | 360,075,081 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,725 | py | """
================== NEW ORDER =================
"""
"""{'e': 'ORDER_TRADE_UPDATE', 'T': 1625046806986, 'E': 1625046806989, 'o':
{'s': 'XRPUSDT', 'c': 'web_0Prp0GPBF3qmNbYihBgn',
'S': 'BUY', 'o': 'MARKET', 'f': 'GTC', 'q': '22.2', 'p': '0', 'ap': '0', 'sp': '0', 'x': 'NEW', 'X': 'NEW',
'i': 15384871700, 'l': '0', 'z': '0', 'L': '0', 'T': 1625046806986, 't': 0, 'b': '0', 'a': '0', 'm': False,
'R': False, 'wt': 'CONTRACT_PRICE', 'ot': 'MARKET', 'ps': 'BOTH',
'cp': False, 'rp': '0',
'pP': False, 'si': 0, 'ss': 0}}"""
"""
=========== s 'XRPUSDT' ep '0.67380'
"""
""" {'e': 'ACCOUNT_UPDATE', 'T': 1625046806986, 'E': 1625046806989, 'a':
{'B': [{'a': 'USDT', 'wb': '209.67765709', 'cw': '194.71862044', 'bc': '0'}],
'P': [{'s': 'XRPUSDT', 'pa': '22.2', 'ep': '0.67380', 'cr': '20.13517000', 'up': '0',
'mt': 'isolated', 'iw': '14.95903665', 'ps': 'BOTH', 'ma': 'USDT'}],
'm': 'ORDER'}}"""
""" {'e': 'ORDER_TRADE_UPDATE', 'T': 1625046806986, 'E': 1625046806989, 'o':
{'s': 'XRPUSDT', 'c': 'web_0Prp0GPBF3qmNbYihBgn', 'S': 'BUY', 'o': 'MARKET',
'f': 'GTC', 'q': '22.2', 'p': '0', 'ap': '0.67380', 'sp': '0', 'x': 'TRADE',
'X': 'FILLED', 'i': 15384871700, 'l': '22.2', 'z': '22.2', 'L': '0.6738',
'n': '0.00598334', 'N': 'USDT', 'T': 1625046806986, 't': 515352260,
'b': '0', 'a': '0', 'm': False, 'R': False, 'wt': 'CONTRACT_PRICE', 'ot': 'MARKET', 'ps': 'BOTH',
'cp': False, 'rp': '0', 'pP': False, 'si': 0, 'ss': 0}}"""
"""
================== NEW TP / SL =================
"""
""" {'e': 'ORDER_TRADE_UPDATE', 'T': 1625046850299, 'E': 1625046850303, 'o':
{'s': 'XRPUSDT', 'c': 'web_ihuNZxkJQ2a1yMFrYQDI', 'S': 'SELL', 'o': 'TAKE_PROFIT_MARKET',
'f': 'GTE_GTC', 'q': '0', 'p': '0', 'ap': '0', 'sp': '0.6760', 'x': 'NEW', 'X': 'NEW',
'i': 15384883175, 'l': '0', 'z': '0', 'L': '0', 'T': 1625046850299, 't': 0, 'b': '0', 'a': '0',
'm': False, 'R': True, 'wt': 'MARK_PRICE', 'ot': 'TAKE_PROFIT_MARKET', 'ps': 'BOTH',
'cp': True, 'rp': '0', 'pP': False, 'si': 0, 'ss': 0}}
{'e': 'ORDER_TRADE_UPDATE', 'T': 1625046850300, 'E': 1625046850303, 'o':
{'s': 'XRPUSDT', 'c': 'web_KQFVXIJXTeG6I9RUDmTo', 'S': 'SELL', 'o': 'STOP_MARKET',
'f': 'GTE_GTC', 'q': '0', 'p': '0', 'ap': '0', 'sp': '0.6720', 'x': 'NEW', 'X': 'NEW',
'i': 15384883176, 'l': '0', 'z': '0', 'L': '0', 'T': 1625046850300, 't': 0, 'b': '0', 'a': '0',
'm': False, 'R': True, 'wt': 'MARK_PRICE', 'ot': 'STOP_MARKET', 'ps': 'BOTH',
'cp': True, 'rp': '0', 'pP': False, 'si': 0, 'ss': 0}}
<<<<<< e >>>>>> :
{'e': 'ORDER_TRADE_UPDATE', 'T': 1625047065159, 'E': 1625047065165, 'o':
{'s': 'XRPUSDT', 'c': 'web_KQFVXIJXTeG6I9RUDmTo', 'S': 'SELL', 'o': 'STOP_MARKET',
'f': 'GTE_GTC', 'q': '0', 'p': '0', 'ap': '0', 'sp': '0.6720', 'x': 'EXPIRED', 'X': 'EXPIRED',
'i': 15384883176, 'l': '0', 'z': '0', 'L': '0', 'T': 1625047065159, 't': 0, 'b': '0', 'a': '0',
'm': False, 'R': True, 'wt': 'MARK_PRICE', 'ot': 'STOP_MARKET', 'ps': 'BOTH',
'cp': True, 'rp': '0', 'pP': False, 'si': 0, 'ss': 0}}
<<<<<< e >>>>>> :
{'e': 'ORDER_TRADE_UPDATE', 'T': 1625047065159, 'E': 1625047065165, 'o':
{'s': 'XRPUSDT', 'c': 'web_KQFVXIJXTeG6I9RUDmTo', 'S': 'SELL', 'o': 'MARKET',
'f': 'GTC', 'q': '22.2', 'p': '0', 'ap': '0', 'sp': '0.6720', 'x': 'NEW', 'X': 'NEW',
'i': 15384883176, 'l': '0', 'z': '0', 'L': '0', 'T': 1625047065159, 't': 0, 'b': '0', 'a': '0',
'm': False, 'R': True, 'wt': 'MARK_PRICE', 'ot': 'STOP_MARKET', 'ps': 'BOTH',
'cp': True, 'rp': '0', 'pP': False, 'si': 0, 'ss': 0}}"""
"""
================== SL RUN =================
"""
"""{'e': 'ACCOUNT_UPDATE', 'T': 1625047065159, 'E': 1625047065165, 'a':
{'B': [{'a': 'USDT', 'wb': '209.65617993', 'cw': '200.55946846', 'bc': '0'}],
'P': [{'s': 'XRPUSDT', 'pa': '13.5', 'ep': '0.67380', 'cr': '20.11603000', 'up': '-0.02469757',
'mt': 'isolated', 'iw': '9.09671147', 'ps': 'BOTH', 'ma': 'USDT'}],
'm': 'ORDER'}}"""
""" {'e': 'ORDER_TRADE_UPDATE', 'T': 1625047065159, 'E': 1625047065165, 'o':
{'s': 'XRPUSDT', 'c': 'web_KQFVXIJXTeG6I9RUDmTo', 'S': 'SELL', 'o': 'MARKET', 'f': 'GTC', 'q': '22.2', 'p': '0',
'ap': '0.67160', 'sp': '0.6720', 'x': 'TRADE', 'X': 'PARTIALLY_FILLED', 'i': 15384883176, 'l': '8.7', 'z': '8.7',
'L': '0.6716', 'n': '0.00233716', 'N': 'USDT', 'T': 1625047065159, 't': 515356385, 'b': '0', 'a': '0', 'm': False,
'R': True, 'wt': 'MARK_PRICE', 'ot': 'STOP_MARKET', 'ps': 'BOTH', 'cp': True,
'rp': '-0.01914000', 'pP': False, 'si': 0, 'ss': 0}}
"""
""" {'e': 'ACCOUNT_UPDATE', 'T': 1625047065159, 'E': 1625047065165, 'a':
{'B': [{'a': 'USDT', 'wb': '209.62285329', 'cw': '209.62285329', 'bc': '0'}],
'P': [{'s': 'XRPUSDT', 'pa': '0', 'ep': '0.00000', 'cr': '20.08633000', 'up': '0',
'mt': 'isolated', 'iw': '0', 'ps': 'BOTH', 'ma': 'USDT'}],
'm': 'ORDER'}}"""
"""<<<<<< e >>>>>> :
{'e': 'ORDER_TRADE_UPDATE', 'T': 1625047065159, 'E': 1625047065165, 'o': {'s': 'XRPUSDT', 'c': 'web_KQFVXIJXTeG6I9RUDmTo', 'S': 'SELL', 'o': 'MARKET', 'f': 'GTC', 'q': '22.2', 'p': '0', 'ap': '0.67160', 'sp': '0.6720', 'x': 'TRADE', 'X': 'FILLED', 'i': 15384883176, 'l': '13.5', 'z': '22.2', 'L': '0.6716', 'n': '0.00362664', 'N': 'USDT', 'T': 1625047065159, 't': 515356386, 'b': '0', 'a': '0', 'm': False, 'R': True, 'wt': 'MARK_PRICE', 'ot': 'STOP_MARKET', 'ps': 'BOTH', 'cp': True, 'rp': '-0.02970000', 'pP': False, 'si': 0, 'ss': 0}}
<<<<<< e >>>>>> :
{'e': 'ORDER_TRADE_UPDATE', 'T': 1625047065159, 'E': 1625047065165, 'o': {'s': 'XRPUSDT', 'c': 'web_ihuNZxkJQ2a1yMFrYQDI', 'S': 'SELL', 'o': 'TAKE_PROFIT_MARKET', 'f': 'GTE_GTC', 'q': '0', 'p': '0', 'ap': '0', 'sp': '0.6760', 'x': 'EXPIRED', 'X': 'EXPIRED', 'i': 15384883175, 'l': '0', 'z': '0', 'L': '0', 'T': 1625047065159, 't': 0, 'b': '0', 'a': '0', 'm': False, 'R': True, 'wt': 'MARK_PRICE', 'ot': 'TAKE_PROFIT_MARKET', 'ps': 'BOTH', 'cp': True, 'rp': '0', 'pP': False, 'si': 0, 'ss': 0}}
"""
# from datetime import datetime
# d = 1625046806986
#
# f = datetime.fromtimestamp(d/1000).strftime('%d.%m.%Y %H:%M')
#
# print(f)
import json
# new_data = {}
#
#
# symbols = ["XRP", "BTC", "ETH",
# "TRX", "EOS", "BNB",
# "LINK", "FIL", "YFI",
# "DOT", "SXP", "UNI",
# "LTC", "ADA", "AAVE"]
#
# for i in symbols:
# new_data[i+'USDT'] = False
#
# print(new_data)
new_data = {"active": False}
main_path = f'/usr/local/WB/dashboard/data/active.json'
with open(main_path, 'w', encoding='utf-8') as outfile:
json.dump(new_data, outfile, ensure_ascii=False, indent=4)
| [
"slavaku2014@gmail.com"
] | slavaku2014@gmail.com |
b111f234be7dbb56a0d5f24a929d01cfb737da18 | dc66c0cf24c5f741b6288f3d73e6436752432dad | /Backend/item/permission.py | 04cd38455921970da28b6f4a29bf6276cd7fc293 | [
"MIT"
] | permissive | Linzecong/LPOJ | bdcf79f5e751419c0cff14c818512d5509fd849f | 2f7ce194f1d510d8d006c2a35fdaa272f20ef1f3 | refs/heads/master | 2023-01-20T15:42:12.865669 | 2022-01-05T15:05:55 | 2022-01-05T15:05:55 | 164,289,923 | 236 | 79 | MIT | 2023-01-14T00:55:14 | 2019-01-06T08:41:36 | Vue | UTF-8 | Python | false | false | 1,855 | py | # coding=utf-8
from rest_framework import permissions
from .models import User
from board.models import SettingBoard
def getWikiPermission():
setting = SettingBoard.objects.filter(id=1)
if len(setting) != 0:
if setting[0].openwiki is False:
return False
else:
return True
else:
return False
def getVisitorPermission(request):
setting = SettingBoard.objects.filter(id=1)
if len(setting) != 0:
if setting[0].openvisitor is False:
userid = request.session.get('user_id', None)
if userid:
return True
else:
return False
else:
return True
else:
return True
class UserOnly(permissions.BasePermission):
def has_permission(self, request, view):
if getVisitorPermission(request) == False:
return False
if getWikiPermission() == False:
return False
if request.method == "DELETE":
return True
if request.method == "GET":
return False
else:
data = request.data
username = str(data.get('user'))
userid = request.session.get('user_id', None)
if userid == username:
return True
else:
return False
def has_object_permission(self, request, view, item):
if getVisitorPermission(request) == False:
return False
if getWikiPermission() == False:
return False
if request.method == "GET":
return False
else:
username = str(item.user.username)
userid = request.session.get('user_id', None)
if userid == username:
return True
else:
return False
| [
"linzecong@corp.netease.com"
] | linzecong@corp.netease.com |
0e65583a2f3733544f9d2a193e93f68be851c9df | 4b2c5fe21ffcc35837bba06d2c3b43c5116f74bd | /Blackjack.py | 8d5c04a77bb98a1ca5b4be7994bed8812a47cdf5 | [] | no_license | joydas65/Codeforces-Problems | 8870cbbf1db9fa12b961cee7aaef60960af714ae | eb0f5877d0fede95af18694278029add7385973d | refs/heads/master | 2023-06-23T07:16:49.151676 | 2023-06-17T07:28:24 | 2023-06-17T07:28:24 | 184,123,514 | 5 | 1 | null | 2020-11-28T07:28:03 | 2019-04-29T18:33:23 | Python | UTF-8 | Python | false | false | 150 | py | n = int(input())
if n <= 10 or n > 21:
print(0)
elif (n >= 11 and n <= 19) or n == 21:
print(4)
else:
print(15)
| [
"noreply@github.com"
] | joydas65.noreply@github.com |
9443ce4d518560dda86c885e6674bd77ee50e7ee | b102fc6c1588a9cd124b4905d486b0bce9e1cb8a | /apt_grayscale/Grayscale.py | 728f63214306debde0a015808163b7787794de24 | [] | no_license | jyu197/comp_sci_101 | 19568a5fde15575ebfcbb0683115b2d06d58c740 | 933ddecd122117b93d7a7f8ef3e8ac55c0db9257 | refs/heads/master | 2016-09-06T09:08:31.028024 | 2015-12-14T07:37:42 | 2015-12-14T07:37:42 | 41,649,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | '''
Created on Sep 9, 2015
@author: Jonathan Yu
'''
def convert(r,g,b):
return 0.21*r + 0.71*g+0.07*b
if __name__ == '__main__':
pass | [
"jyu197@gmail.com"
] | jyu197@gmail.com |
de46e540b31326ef630d5201263e4620f7b9132b | 2e90219b0eb471c2e7b9ea3ee532ab750497a33a | /asatlib/util/distance.py | 320ea4822ef36e35d3fa177646e930d39aad345b | [] | no_license | atrautsch/emse2019_replication | c6d8c65735235ed47674cbccd312c159b9a12d4b | e79b2abf54639d8cf0d136f89175888ae56ac05d | refs/heads/master | 2022-12-24T04:35:59.590458 | 2020-07-16T13:16:08 | 2020-07-16T13:16:08 | 225,346,188 | 0 | 1 | null | 2022-12-15T23:11:08 | 2019-12-02T10:21:26 | Jupyter Notebook | UTF-8 | Python | false | false | 4,286 | py |
import numpy as np
def levenshtein(prevs, currs):
"""Levenshtein distance metric implemented with Wagner-Fischer algorithm."""
ops = []
# trivial cases
if not prevs:
for cur in currs:
ops.append('add:{}'.format(cur))
if not currs:
for prev in prevs:
ops.append('del:{}'.format(prev))
# 1. initialize matrix with words including 0 word
rows = len(prevs) + 1
cols = len(currs) + 1
matrix = np.zeros((rows, cols))
matrix[0] = range(cols)
matrix[:, 0] = range(rows)
# 2. fill matrix according to levenshtein rules
for row in range(1, rows):
for col in range(1, cols):
# we skip 0 word with range(1, ) need to subtract again from word sequence
prev = prevs[row - 1]
curr = currs[col - 1]
# if char is the same use character use previous diagonal element because nothing has changed
if prev == curr:
matrix[row, col] = matrix[row - 1, col - 1]
# else use minval of upper, leftmost and previous diagonal element + 1
else:
# but we do not necessarily know which one
# matrix[row, col - 1] insertions
# matrix[row - 1, col] deletion
# matrix[row - 1, col - 1] substitution
minval = min(matrix[row, col - 1], matrix[row - 1, col], matrix[row - 1, col - 1])
matrix[row, col] = minval + 1
# print(matrix)
distance = matrix[rows - 1, cols - 1]
# operations by using the matrix again from bottom right to top left
# https://stackoverflow.com/questions/41149377/extracting-operations-from-damerau-levenshtein
row = rows - 1
col = cols - 1
last_traversal = None
while row > 0 and col > 0:
idx = min([matrix[row, col - 1], matrix[row - 1, col], matrix[row - 1, col - 1]])
# es gibt kein minimum kleiner als aktuelle zelle und wir sind noch nicht am rand im nächsten schritt
if idx == matrix[row, col] and not (row - 1 == 0 or col - 1 == 0):
row -= 1
col -= 1
continue
# wir sind am rand der matrix angekommen
if row - 1 == 0 and not col -1 == 0: # oberer rand rest ist insert oder keine änderung
last_traversal = 'left'
if idx < matrix[row, col]:
ops.append('add:{}'.format(currs[col - 1]))
col -= 1
continue
if col - 1 == 0 and not row -1 == 0: # unterer rand, rest ist delete oder keine änderung
last_traversal = 'up'
if idx < matrix[row, col]:
ops.append('del:{}'.format(prevs[row - 1]))
row -= 1
continue
if col - 1 == 0 and row - 1 == 0: # ende erreicht, letzte änderung basiert auf unserer letzten operation, wenn es keine gab dann ist es eine subst
if idx < matrix[row, col]:
if last_traversal == 'up':
ops.append('del:{}'.format(prevs[row - 1]))
elif last_traversal == 'left':
ops.append('add:{}'.format(currs[col - 1]))
else:
# ops.append('substitution:{}->{}'.format(prevs[row - 1], currs[col - 1]))
ops.append('del:{}'.format(prevs[row - 1]))
ops.append('add:{}'.format(currs[col - 1]))
col -= 1
row -= 1
continue
# es gibt ein minimum dem wir folgen
if idx < matrix[row, col]:
# finden wir die richtung, präferenz deletion, insertion, substitution
if matrix[row - 1, col] < matrix[row, col]:
ops.append('del:{}'.format(prevs[row - 1]))
row -= 1
elif matrix[row, col - 1] < matrix[row, col]:
ops.append('add:{}'.format(currs[col - 1]))
col -= 1
elif matrix[row - 1, col - 1] < matrix[row, col]:
# ops.append('substitution:{}->{}'.format(prevs[row - 1], currs[col - 1]))
ops.append('del:{}'.format(prevs[row - 1]))
ops.append('add:{}'.format(currs[col - 1]))
row -= 1
col -= 1
return distance, list(reversed(ops))
| [
"alexander.trautsch@cs.uni-goettingen.de"
] | alexander.trautsch@cs.uni-goettingen.de |
369b6995542f80e18d20836e974b8ce0572cf80f | 511dbb4c1b132263ff2f943eee574edf6963a027 | /bitcoin.py | 417f548fa7b17cf66681463b3a01e237c6bd503a | [] | no_license | ElToro13/Python | b41c07c227d2574178de474e04958448bf1b57d9 | 90e2c741f4d851a6866907a0cc9c12cd088c4c8c | refs/heads/master | 2022-12-09T05:51:50.287085 | 2019-04-27T23:30:34 | 2019-04-27T23:30:34 | 98,705,694 | 0 | 0 | null | 2022-12-08T00:43:56 | 2017-07-29T03:31:37 | Python | UTF-8 | Python | false | false | 1,068 | py | import requests
import json
def Bitcoin():
r = requests.get("https://www.zebapi.com/api/v1/market/ticker/btc/inr")
data = json.loads(r.content)
print(data["buy"])
print(data["sell"])
B = int(data["buy"])
S = int(data["sell"])
if B>S:
print("Difference: " + str(B-S))
'''
listo = []
ID = []
for i in range(0,len(team2)):
listo.append(team2[i]['team_id'])
for j in range(0,len(listo)):
try:
URL = "http://api.football-api.com/2.0/matches?comp_id=1204&team_id=" + listo[j] + "&match_date=2017-09-30&Authorization=565ec012251f932ea4000001fa542ae9d994470e73fdb314a8a56d76"
fix_init = requests.get(URL)
fix = json.loads(fix_init.content)
match_id = fix[0]['id']
if match_id not in ID:
ID.append(match_id)
local = fix[0]['localteam_name']
away = fix[0]['visitorteam_name']
print(local + " V/s " + away)
except KeyError:
print()
'''
Bitcoin()
| [
"noreply@github.com"
] | ElToro13.noreply@github.com |
68d8a45546d0bbb7928e9f2ea369230349a8c8f7 | 55540eac867d0a402cc3819f744679fb1a80542d | /Intro_Regular_Expressions.py | 98b3da4a33ca318fba76bcf394f33c7fbc9c4154 | [] | no_license | PrinceAkpabio/Search-program | c620ed368c710c80a634cc23d6a7d848e23faf87 | 86ca13c640c7d20fd923438c773650e9bde88543 | refs/heads/master | 2022-12-11T06:02:15.635279 | 2020-08-30T20:52:35 | 2020-08-30T20:52:35 | 291,549,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | import re
Meeting = 'Hello from prince@gmail.com about the meeting @ 3 P.M'
emails = re.findall('\S+@\S+', Meeting)
print(emails) | [
"princeakpabio90@gmail.com"
] | princeakpabio90@gmail.com |
2171406db9a9c2204f98d47de403695c0137c1ef | 98009813fb09ca43e95f17f157dfcd6cf3cdf1a2 | /cabinet/migrations/0002_registrationrequest.py | 3db3ea4253b237b0802f1765d436375fe7845d65 | [] | no_license | elenpetrovich/personal-cabinet | 34d4c0bdab2d5e7302de19642802fe966794d38e | 916915de387fc99fe933259eef80147b6c945f0e | refs/heads/master | 2023-05-13T23:10:16.904444 | 2021-06-10T23:48:09 | 2021-06-10T23:48:09 | 334,699,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | # Generated by Django 3.1.7 on 2021-05-18 09:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cabinet', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='RegistrationRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fio', models.CharField(max_length=511, verbose_name='ФИО')),
('email', models.EmailField(max_length=254, verbose_name='Электронная почта')),
('phone', models.CharField(max_length=12, verbose_name='Номер телефона')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Дата')),
('ip', models.GenericIPAddressField(verbose_name='IP')),
],
),
]
| [
"elenapetrovich28@gmail.com"
] | elenapetrovich28@gmail.com |
5b08ad9e0113f3f12bec8bf4cb18d76f59ce24c4 | 3445b0684ff8f1e16c649991cce4cb745a3e6c77 | /venv/bin/isort | b0745586e9f633e64d1fd1c752af407363b62fe7 | [] | no_license | technic1/django-blog | 76cd063c48b8870b9aae66791a42ca84a890c82f | 8d245fad46bda7cb9231a8f2560127b56d094e9f | refs/heads/master | 2020-09-05T19:38:06.120186 | 2019-11-12T18:30:52 | 2019-11-12T18:30:52 | 220,193,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/home/technic/PycharmProjects/django_blog/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"dps_96@mail.ru"
] | dps_96@mail.ru | |
60b4b7f8ae3624a487bdf78b9ff1449db7aa2e84 | 9fdff458f1e20321aaa70d4669eeacb9423c9f36 | /multi/train/train_sources_weights_loop.py | c928b2c8a1b372db96c7899e0a3cd85c0df91aa4 | [] | no_license | leemengwei/GNRX | 11639716f220f4721e521998ff282ee40ca50275 | 80c5b78826187a519588af3d8c71fb40ba7b94fe | refs/heads/main | 2023-04-01T00:34:12.638628 | 2021-03-29T05:02:03 | 2021-03-29T05:02:03 | 352,524,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,721 | py | # python train_sources_weights_loop.py -A
import config
import datetime
import pandas as pd
import numpy as np
import os,sys
import matplotlib.pyplot as plt
import shutil
import argparse
from xgboost.sklearn import XGBRegressor
from sklearn.linear_model import LinearRegression, LassoCV, Ridge, Lasso
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import PolynomialFeatures
import warnings
import grasp_from_zhikong_web
import dependency_wind_bases
#from IPython import embed
#import tqdm
#from sko.GA import GA
#import torch
warnings.filterwarnings('ignore')
def evaluate(cap:float, real:pd.Series, predict:pd.Series, method:str='MSE') -> np.array:
if method == 'MSE':
error = np.nanmean((real - predict)**2)
elif method == 'MAE':
error = np.nanmean(np.abs(real - predict))
else:
import AccuracyFormula #ZouQianKun's lib
data = pd.DataFrame({'time':real.index, 'gt':real, 'predict':predict})
#For NorthWest region
if method == 'PianChaKaoHe':
error = AccuracyFormula.CalcDIP_byDate(data, 'time', 'gt', 'predict', cap, 0.25)
#For other regions
elif method == 'KouDian_RMSE':
error = AccuracyFormula.CalcKouDian_RMSE_byDate(data, 'time', 'gt', 'predict', cap, 0.8) #80% for wind RMSE
elif method == 'KouDian_MAE':
error = AccuracyFormula.CalcKouDian_MAE_byDate(data, 'time', 'gt', 'predict', cap, 0.85) #85% for wind MAE
else:
raise NotImplementedError
error = pd.DataFrame.from_dict(error, orient='index', columns=['koudian'])
error = error.values.sum()
return error
def save_log(Y_test:pd.Series, optimized_combination_test:pd.Series, business_power_test:pd.Series, filename:str='log') -> np.array:
out = pd.DataFrame({'real': Y_test, 'combined': optimized_combination_test, 'business': business_power_test})
out.to_csv(os.path.join('..', 'output', '%s.csv'%filename))
def batchGradientDescent(x, y, theta, alpha = 0.1, maxIterations=10000):
m = x.shape[0]
alpha = alpha/m
for i in range(0, maxIterations):
y_pred = np.dot(x, theta)
ERROR_loss = 1/m * np.sum(np.abs(y_pred - y))
#ERROR_gradient:
mask = (y-y_pred).copy()
mask[y-y_pred>0] = 1
mask[y-y_pred<=0] = -1
#theta = theta - alpha * gradient
theta = theta + alpha * 1/m * mask.dot(x)
print('epoch', i, ERROR_loss)
return np.array(theta)
def obj_func(W):
error = np.nanmean(np.abs((W*X_train.values).sum(axis=1) - Y_train.values))
#error = np.nanmean(np.abs((W*X_train.values).sum(axis=1) - Y_train.values)**2)
return error
def save_output(station_name, meteor_powers, w):
#save output:
if not os.path.exists(os.path.join('..', 'output', station_name)):
os.mkdir(os.path.join('..', 'output', station_name))
meteor_weights = get_ready_output(meteor_powers.columns, w)
meteor_weights.to_csv(os.path.join('..', 'output', station_name, 'weights.csv'))
shutil.copy(os.path.join('..', 'data', 'model_curve_data', '%s.csv'%station_name), os.path.join('..', 'output', station_name, 'curve.csv'))
return meteor_weights
def get_ready_output(column_names, w):
col_names = []
for i in column_names:
col_names.append(i.strip('pow_'))
meteor_weights = pd.DataFrame(w.reshape(1, -1), columns=col_names)
aux = { \
'day_2_factor_for_day_5': 0.333333333,
'day_3_factor_for_day_5': 0.333333333,
'day_4_factor_for_day_5': 0.333333333,
'day_2_factor_for_day_6': 0.45,
'day_4_factor_for_day_6': 0.35,
'day_5_factor_for_day_6': 0.2,
'day_3_factor_for_day_7': 0.4,
'day_4_factor_for_day_7': 0.05,
'day_5_factor_for_day_7': 0.2,
'day_6_factor_for_day_7': 0.35,
'day_5_factor_for_day_8': 0.333333333,
'day_6_factor_for_day_8': 0.333333333,
'day_7_factor_for_day_8': 0.333333333,
'day_5_factor_for_day_9': 0.45,
'day_7_factor_for_day_9': 0.35,
'day_8_factor_for_day_9': 0.2,
'day_6_factor_for_day_10': 0.4,
'day_7_factor_for_day_10': 0.05,
'day_8_factor_for_day_10': 0.2,
'day_9_factor_for_day_10': 0.35,
'day_1_factor_for_day_11': 0.4,
'day_3_factor_for_day_11': 0.3,
'day_4_factor_for_day_11': 0.3,
'day_lowest_power_threshold': 5,
'day_set_lowest_to': 0
}
for name in aux.keys():
meteor_weights[name] = aux[name]
meteor_weights = meteor_weights.T
meteor_weights.columns=['weights']
meteor_weights['source_name'] = meteor_weights.index
meteor_weights.index = meteor_weights['source_name']
meteor_weights = meteor_weights.drop('source_name', axis=1)
return meteor_weights
def get_steady_meteor_powers(meteor_powers):
meteor_powers = meteor_powers.fillna(0)
singular_column_names = list(meteor_powers.columns[meteor_powers.values.mean(axis=0)==0])
if len(singular_column_names) > 0:
meteor_powers = meteor_powers.drop(singular_column_names, axis=1)
print("Notice: %s drop for calculation steadyness"%singular_column_names)
return meteor_powers
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--train_length', '-TRAIN', type=int, default=config.train_length)
parser.add_argument('--VISUALIZATION', "-V", action='store_true', default=False)
parser.add_argument('--ANIMATION', "-A", action='store_true', default=False)
parser.add_argument('--use_spd', "-SPD", action='store_true', default=False)
parser.add_argument('--test_length', '-TEST', type=int, default=config.test_length)
parser.add_argument('--shift_months', '-S', type=int, default=config.shift_months)
parser.add_argument('--data_gap_day', '-G', type=int, default=0)
parser.add_argument('--loop_days', '-L', type=int, default=config.loop_days)
parser.add_argument('--method', '-M', type=str, default='ode')
parser.add_argument('--filename', '-F', type=str, default=config.filename)
args = parser.parse_args()
args.test_length = int(args.test_length - 1)
print(args)
shift_now = datetime.datetime.today() - datetime.timedelta(args.shift_months*31)
station_names = pd.read_csv(args.filename, header=None).iloc[:,:]
start_date_grasp_date = shift_now - datetime.timedelta(args.train_length+args.test_length+args.loop_days+args.data_gap_day)
start_date_grasp = start_date_grasp_date.strftime("%Y-%m-%d")
end_date_grasp = shift_now.strftime("%Y-%m-%d")
sources_to_use = pd.read_csv(os.path.join('..', 'data', 'sources_to_use.csv'), index_col=0)
sources_to_use = sources_to_use[sources_to_use['use_or_not'] == 1]
logs = pd.DataFrame(index = list(pd.Series(station_names.values.reshape(-1))), columns= ['ERROR_optimized_train', 'ERROR_optimized_test', 'ERROR_business_train', 'ERROR_business_test', 'improvement_train (%)', 'improvement_test (%)', 'remark'])
for col in logs.columns:
for row in logs.index:
logs.loc[row, col] = []
#STATION LOOP:
overall_LSE_imp = []
overall_ERROR_imp = []
for station_name in station_names.iterrows():
station_name = station_name[1].values[0]
print("\n\nStation: %s"%station_name)
print("grasp data %s~%s"%(start_date_grasp, end_date_grasp))
#read zhikong data:
raw_data_all, cap, plant_name, FarmType, longitude, latitude = grasp_from_zhikong_web.read_statistic_base(station_name, start_date_grasp, end_date_grasp, readweather=1)
raw_data_all = raw_data_all.dropna(subset=['power_true'])
cap = float(cap)
if len(raw_data_all)==0:
log = 'no gt'
logs.loc[station_name, 'remark'] = log
print(log)
continue
assert (FarmType == 0) #0 for wind
raw_data_all = raw_data_all.loc[np.append(True, (raw_data_all.power_true.values[1:] - raw_data_all.power_true.values[:-1]) != 0)]
#get powers:
real_power = np.clip(np.abs(raw_data_all['power_true']), -cap, cap)
if 'fore_power' in raw_data_all.columns:
business_power = np.clip(np.abs(raw_data_all['fore_power']), -cap, cap)
else:
log = 'no fore power'
logs.loc[station_name, 'remark'] = log
raw_data_all['fore_power'] = 0.1
business_power = np.clip(np.abs(raw_data_all['fore_power']), -cap, cap)
print(log)
column_names_to_use = []
for i in raw_data_all.columns:
if args.use_spd:
use_feature = (i.startswith('pow_') or i.startswith('spd_'))
else:
use_feature = i.startswith('pow_')
if use_feature and (i in sources_to_use.index):
column_names_to_use.append(i)
#get de min power:
if 'de_min' in sources_to_use.index:
#Use curve to give power prediction:
DeMin_curve = pd.read_csv(os.path.join('..', 'data', 'model_curve_data', '%s.csv'%station_name), index_col=0)
if 'spd_7' not in raw_data_all.columns:
log = 'No spd_7, thus no de_min'
logs.loc[station_name, 'remark'] = log
print(log)
else:
column_names_to_use += ['de_min']
DeMin_prediction = pd.Series(np.interp(raw_data_all.loc[raw_data_all.index, 'spd_7'], DeMin_curve.values[:,0], DeMin_curve.values[:,1]), index=raw_data_all.index)
raw_data_all['de_min'] = DeMin_prediction
meteor_powers = raw_data_all[column_names_to_use]
if len(raw_data_all) == 0:
log = 'no gt data'
logs.loc[station_name, 'remark'] = log
print(log)
continue
elif meteor_powers.shape[1] == 0:
log = 'no meteor_powers'
logs.loc[station_name, 'remark'] = log
print(log)
continue
else:
#TIMESLICE LOOP: #when loop over train and test
error_recorder = dependency_wind_bases.Recorder()
concat_optimized_test = pd.Series([], dtype=float)
concat_business_test = pd.Series([], dtype=float)
concat_real_test = pd.Series([], dtype=float)
plt.ion()
for i in list(range(args.loop_days)):
print('Time slice', i)
start_date_train = start_date_grasp_date + datetime.timedelta(i)
end_date_train = start_date_train + datetime.timedelta(args.train_length)
start_date_test = end_date_train + datetime.timedelta(int(args.data_gap_day+1))
end_date_test = start_date_test + datetime.timedelta(args.test_length)
start_date_train_str = start_date_train.strftime("%Y-%m-%d")
end_date_train_str = end_date_train.strftime("%Y-%m-%d")
start_date_test_str = start_date_test.strftime("%Y-%m-%d")
end_date_test_str = end_date_test.strftime("%Y-%m-%d")
print("Train from %s to %s"%(start_date_train_str, end_date_train_str))
print("Test from %s to %s"%(start_date_test_str, end_date_test_str))
meteor_powers_slice = meteor_powers.loc[start_date_train_str: end_date_test_str]
real_power_slice = real_power[meteor_powers_slice.index]
#split dataset:
X = meteor_powers_slice
X['bias'] = 1
Y = real_power_slice
X_train, X_test = X.loc[start_date_train_str:end_date_train_str], X.loc[start_date_test_str:end_date_test_str]
Y_train, Y_test = Y.loc[start_date_train_str:end_date_train_str], Y.loc[start_date_test_str:end_date_test_str]
#handle duplicates
X_train = get_steady_meteor_powers(X_train).dropna()
X_test = X_test[X_train.columns].dropna()
Y_train = Y_train.dropna()
Y_test = Y_test.dropna()
if len(set(X_train.columns) - {'de_min', 'bias'}) == 0:
log = 'source not enough'
logs.loc[station_name, 'remark'] = log
print(log)
continue
if X_train.shape[0] < X_train.shape[1]:
print("shape of X 0<1")
continue
if Y_test.shape[0] < 6:
print("len Y <6")
continue
business_power_train = business_power.loc[Y_train.index]
business_power_test = business_power.loc[Y_test.index]
#Choose methods:
if args.method == 'ode':
#solve ODE equation:
try:
w = np.linalg.solve(np.dot(X_train.T.copy(), X_train), np.dot(X_train.T.copy(), Y_train))
except Exception as e:
log = '%s, \n %s, \t %s'%(e, X_train.describe(), X_test.describe())
#log = '%s'%e
logs.loc[station_name, 'remark'] = log
print(log)
continue
elif args.method == 'gd':
init_w = [1/X_train.shape[1]]*(X_train.shape[1]-1) # -1 for bias
w = batchGradientDescent(X_train, Y_train, init_w+[1], alpha=0.1)
elif args.method == 'ga': #ga
w = np.tile(0, (1,5))
n_dim = X_train.shape[1]
lb = [-3]*(n_dim-1);lb.append(-20)
ub = [3]*(n_dim-1);ub.append(20)
ga = GA(func=obj_func, n_dim=n_dim, size_pop=1000, max_iter=1000, lb=lb, ub=ub)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#ga.to(device=deivce)
w, residuals = ga.run()
elif args.method == 'poly': #poly lr
w = np.tile(0, (1,5))
poly = PolynomialFeatures(degree=2)
poly_X_train = poly.fit_transform(X_train.iloc[:,:-1])
poly_X_test = poly.fit_transform(X_test.iloc[:,:-1])
regressor = LinearRegression()
regressor.fit(poly_X_train, Y_train)
elif args.method == 'xgb':
w = np.tile(0, (1,5))
regressor = XGBRegressor(max_depth=4)
regressor.fit(X_train, Y_train)
elif args.method == 'lasso':
w = np.tile(0, (1,5))
regressor = Lasso()
regressor.fit(X_train, Y_train)
elif args.method == 'ridge':
w = np.tile(0, (1,5))
regressor = Ridge()
regressor.fit(X_train, Y_train)
elif args.method == 'mlp':
w = np.tile(0, (1,5))
regressor = MLPRegressor()
regressor.fit(X_train, Y_train)
else:
regressor = None
#eval train:
if args.method == 'ode' or args.method == 'ga' or args.method == 'gd':
optimized_combination_train = (w*X_train).sum(axis=1)
optimized_combination_test = (w*X_test).sum(axis=1)
elif args.method == 'poly':
optimized_combination_train = regressor.predict(poly_X_train)
optimized_combination_test = regressor.predict(poly_X_test)
optimized_combination_train = pd.Series(optimized_combination_train, index=X_train.index)
optimized_combination_test = pd.Series(optimized_combination_test, index=X_test.index)
else:
optimized_combination_train = regressor.predict(X_train)
optimized_combination_test = regressor.predict(X_test)
optimized_combination_train = pd.Series(optimized_combination_train, index=X_train.index)
optimized_combination_test = pd.Series(optimized_combination_test, index=X_test.index)
#eval train:
optimized_combination_train = np.clip(optimized_combination_train, 0, max(Y))
ERROR_optimized_train = evaluate(cap, Y_train, optimized_combination_train, method=config.eval_metric)
ERROR_business_train = evaluate(cap, Y_train, business_power_train, config.eval_metric)
ERROR_improvement_train = (ERROR_business_train-ERROR_optimized_train)/ERROR_business_train*100
#eval test:
optimized_combination_test = np.clip(optimized_combination_test, 0, max(Y))
ERROR_optimized_test = evaluate(cap, Y_test, optimized_combination_test, config.eval_metric)
ERROR_business_test = evaluate(cap, Y_test, business_power_test, config.eval_metric)
ERROR_improvement_test = (ERROR_business_test-ERROR_optimized_test)/ERROR_business_test*100
#save externals:
save_log(Y_test, optimized_combination_test, business_power_test, station_name)
meteor_weights = save_output(station_name, X_train, w)
print("Train Improvement from %s to %s, %s%%"%(ERROR_business_train, ERROR_optimized_train, ERROR_improvement_train))
print("Test Improvement from %s to %s, %s%%"%(ERROR_business_test, ERROR_optimized_test, ERROR_improvement_test))
#print('Weight:', meteor_weights)
if args.ANIMATION:
plt.plot(meteor_powers_slice, 'blue', alpha=0.2, linewidth=3, label='sources')
plt.plot(Y_train, 'k', alpha=0.5)
plt.plot(optimized_combination_train, 'g', alpha=0.5)
plt.plot(business_power_train, 'r', alpha=0.5)
plt.plot(Y_test, 'k', label='real')
plt.plot(optimized_combination_test, 'g', label='optimized', linestyle='--')
plt.plot(business_power_test, 'r', label='business', linestyle=':')
plt.title('%s, %s, %s'%(station_name, ERROR_improvement_train, ERROR_improvement_test))
plt.legend()
plt.grid()
plt.draw()
plt.pause(0.1)
plt.clf()
#Misc
concat_optimized_test = concat_optimized_test.append(optimized_combination_test)
concat_business_test = concat_business_test.append(business_power_test)
concat_real_test = concat_real_test.append(Y_test)
error_recorder.add_one('%s_ERROR_optimized_train'%station_name, ERROR_optimized_train)
error_recorder.add_one('%s_ERROR_optimized_test'%station_name, ERROR_optimized_test)
error_recorder.add_one('%s_ERROR_business_train'%station_name, ERROR_business_train)
error_recorder.add_one('%s_ERROR_business_test'%station_name, ERROR_business_test)
error_recorder.add_one('%s_improvement_train (%%)'%station_name, ERROR_improvement_train)
error_recorder.add_one('%s_improvement_test (%%)'%station_name, ERROR_improvement_test)
#TIME LOOP DONE.
plt.close()
plt.ioff()
#Mean over redudant slices:
if len(concat_optimized_test) == 0:
print("len concat test =0")
continue
#Concatenate all slices of timeloops
optimized_combination_test = concat_optimized_test.resample('15min').mean().dropna()
business_power_test = business_power.reindex(optimized_combination_test.index).dropna()
common_index = optimized_combination_test.index & business_power_test.index
optimized_combination_test = optimized_combination_test.loc[common_index]
business_power_test = business_power_test.loc[common_index]
real_power_test = real_power.loc[common_index]
ERROR_opt = np.nanmean(np.abs(optimized_combination_test - real_power_test))
LSE_opt = np.nanmean((optimized_combination_test - real_power_test)**2)
ERROR_bus = np.nanmean(np.abs(business_power_test - real_power_test))
LSE_bus = np.nanmean((business_power_test - real_power_test)**2)
ERROR_imp = (ERROR_bus-ERROR_opt)/ERROR_bus*100
LSE_imp = (LSE_bus-LSE_opt)/LSE_bus*100
logs.loc[station_name, 'ERROR_optimized_train'] = error_recorder.get_mean('%s_ERROR_optimized_train'%station_name)
logs.loc[station_name, 'ERROR_optimized_test'] = error_recorder.get_mean('%s_ERROR_optimized_test'%station_name)
logs.loc[station_name, 'ERROR_business_train'] = error_recorder.get_mean('%s_ERROR_business_train'%station_name)
logs.loc[station_name, 'ERROR_business_test'] = error_recorder.get_mean('%s_ERROR_business_test'%station_name)
logs.loc[station_name, 'improvement_train (%)'] = error_recorder.get_mean('%s_improvement_train (%%)'%station_name)
logs.loc[station_name, 'improvement_test (%)'] = error_recorder.get_mean('%s_improvement_test (%%)'%station_name)
logs.loc[station_name, 'loop_optimized_output'] = ','.join(np.round(optimized_combination_test, 1).astype(str).to_list())
logs.loc[station_name, 'loop_real_output'] = ','.join(np.round(real_power_test, 1).astype(str).to_list())
logs.loc[station_name, 'loop_business_power_test'] = ','.join(np.round(business_power_test, 1).astype(str).to_list())
logs.loc[station_name, 'loop_test ERROR (%)'] = ERROR_imp
logs.loc[station_name, 'loop_test LSE (%)'] = LSE_imp
print('loop given: ERROR:%s, LSE:%s'%(ERROR_imp, LSE_imp))
#plots:
fig = plt.figure(figsize=(18, 10))
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.plot(meteor_powers.loc[Y_train.index], alpha=0.4, c='gray', label='sources')
ax1.plot(real_power.loc[Y_train.index], label='real', c='k')
ax1.plot(optimized_combination_train, label='optimized', c='g')
ax1.plot(business_power_train, label='business', c='r')
ax2.plot(meteor_powers.reindex(optimized_combination_test.index), alpha=0.4, c='gray', label='sources')
ax2.plot(real_power_test, label='real', c='k')
ax2.plot(optimized_combination_test, label='optimized', c='g', alpha=1)
ax2.plot(business_power_test, label='business', c='r', alpha=0.8)
ax1.legend()
ax2.legend()
ax1.set_title("%s \nTrain result for meteor sources, improvement(%s): %s%%"%(station_name, config.eval_metric, ERROR_improvement_train))
ax2.set_title("%s \nTest result for meteor sources, improvement(%s): %s%%"%(station_name, config.eval_metric, ERROR_improvement_test))
ax1.grid()
ax2.grid()
if args.VISUALIZATION:
plt.show()
plt.savefig(os.path.join('..', 'png', '%s_%s_%s.png'%(station_name, args.train_length, args.test_length)))
plt.close()
#STATION LOOP DONE
#Statistics:
#logs.to_csv(os.path.join('..', 'output', 'detials_%s_%s_%s_%s_%s_%s-%s_loop%s.csv'%(args.train_length, args.test_length, np.nanmean(overall_LSE_imp), args.method, args.use_spd, start_date_grasp, end_date_grasp, args.loop_days)))
#print(logs)
#print(logs.describe())
print("Finish list", args.filename)
| [
"1099366685@qq.com"
] | 1099366685@qq.com |
4a3bb92e0a9b95c1fc10eb9db2fd34e8f5cdcb8d | 1669bf106be7e4e88ad957aa1f0a708a49f9ef87 | /first_website/setup.py | d87bfe260f13a957af9d07c566ab6284fad70c61 | [] | no_license | genzj/python-fundamentals-course | 280166037bb6ff25e2400fa3b281de153824c622 | 31218a42c609d923b3ae0c7d785b9dc02c0d9a6e | refs/heads/master | 2023-01-09T22:13:04.040355 | 2021-03-20T02:33:18 | 2021-03-20T02:33:18 | 218,776,587 | 2 | 2 | null | 2022-12-26T20:59:32 | 2019-10-31T13:47:43 | Python | UTF-8 | Python | false | false | 1,512 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [ ]
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="Jie ZHU",
author_email='zj0512@gmail.com',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Python Boilerplate contains all the boilerplate you need to create a Python package.",
install_requires=requirements,
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='first_website',
name='first_website',
packages=find_packages(include=['first_website', 'first_website.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/genzj/first_website',
version='0.1.0',
zip_safe=False,
)
| [
"zj0512@gmail.com"
] | zj0512@gmail.com |
a76686d677e0af6f20af6378a0b31012344f55d6 | 954decc95ed7903f0eb218dcd6bfebe047ae480d | /alphabet_QML.py | 7bd96f9c626117f131ef72be64dde98a97aa136f | [] | no_license | wweronika/qml_language_guessing | d8fed6e1d0c061b5e1995cc81b8f4435feb1e0fb | f23e650391ae1c640f356e598977f03db6c3405c | refs/heads/master | 2022-12-25T17:53:49.106721 | 2020-09-16T16:00:11 | 2020-09-16T16:00:11 | 296,050,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | import numpy as np
from polyadicqml import Classifier
from polyadicqml.qiskit.utility import Backends
from polyadicqml.qiskit import qkCircuitML
from polyadicqml.manyq import mqCircuitML
from qiskit import Aer
from dataLoader import loadData
X_train, X_test, y_train, y_test = loadData("data.csv","languages.csv")
def languageCircuit(bdr, x, p):
bdr.allin(x[[0,1]])
bdr.cz(0,1).allin(p[[0,1]])
#from 2 to 25 inclusive in intervals of 2
for i in range(2, 26, 2):
bdr.cz(0,1).allin(x[[i, i+1]])
bdr.cz(0,1).allin(p[[i, i+1]])
return bdr
nbqbits = 2
nbparams = 26
qc = mqCircuitML(make_circuit=languageCircuit, nbqbits=nbqbits, nbparams=nbparams)
bitstr = ['00', '01', '10', '11']
model = Classifier(qc, bitstr).fit(X_train, y_train, method="BFGS")
backend = Aer.get_backend('qasm_simulator')
qc = qkCircuitML(
make_circuit=languageCircuit,
nbqbits=nbqbits, nbparams=nbparams,
backend=backend
)
model.set_circuit(qc)
model.nbshots = 330
model.job_size = 30
pred_train = model(X_train)
pred_test = model(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score
def print_results(target, pred, name="target"):
print('\n' + 30*'#',
"Confusion matrix on {}:".format(name), confusion_matrix(target, pred),
"Accuracy : " + str(accuracy_score(target, pred)),
sep='\n')
print_results(y_train, pred_train, name="train")
print_results(y_test, pred_test, name="test")
| [
"wiesiolekweronika@gmail.com"
] | wiesiolekweronika@gmail.com |
60455ef82917e652d266daf9f738e50cf8c30517 | 3ca229c0c485f7df336f5207726fe247dc1e2237 | /protwo/apptwo/models.py | 911f0aac0b6bfef568beac16f8fbbf99dba74162 | [] | no_license | muskanbhatt2355/django_db_fornms | f7d886523dbe5a29b2192d13a4fae0e099957f19 | 99fd4600cac575a6fef7033f8a68d975faa17864 | refs/heads/master | 2021-05-18T00:42:42.230134 | 2020-03-29T12:50:29 | 2020-03-29T12:50:29 | 251,029,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | from django.db import models
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField(max_length=264,unique=True)
class Friend(models.Model):
nick_name = models.CharField(max_length=255)
address = models.CharField(max_length=255)
| [
"muskanbhatt2355@gmail.com"
] | muskanbhatt2355@gmail.com |
93af8f67f99cacadec773970d6e4593f6c1b339e | dd098f8a93f787e38676283679bb39a290ba28b4 | /samples/openapi3/client/3_0_3_unit_test/python-experimental/test/test_paths/test_response_body_post_maxlength_validation_response_body_for_content_types/test_post.py | b5ee61f9396dd84fdab727dd1f5cee04e4b0aa0f | [
"Apache-2.0"
] | permissive | InfoSec812/openapi-generator | 727c0235d3bad9b85ac12068808f844287af6003 | e0c72702c3d5dae2a627a2926f0cddeedca61e32 | refs/heads/master | 2022-10-22T00:31:33.318867 | 2022-08-20T14:10:31 | 2022-08-20T14:10:31 | 152,479,633 | 1 | 0 | Apache-2.0 | 2023-09-04T23:34:09 | 2018-10-10T19:38:43 | Java | UTF-8 | Python | false | false | 7,073 | py | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
import unittest
from unittest.mock import patch
import urllib3
import unit_test_api
from unit_test_api.paths.response_body_post_maxlength_validation_response_body_for_content_types import post # noqa: E501
from unit_test_api import configuration, schemas, api_client
from .. import ApiTestMixin
class TestResponseBodyPostMaxlengthValidationResponseBodyForContentTypes(ApiTestMixin, unittest.TestCase):
"""
ResponseBodyPostMaxlengthValidationResponseBodyForContentTypes unit test stubs
"""
_configuration = configuration.Configuration()
def setUp(self):
used_api_client = api_client.ApiClient(configuration=self._configuration)
self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501
def tearDown(self):
pass
response_status = 200
def test_too_long_is_invalid_fails(self):
# too long is invalid
accept_content_type = 'application/json'
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
"foo"
)
mock_request.return_value = self.response(
self.json_bytes(payload),
status=self.response_status
)
with self.assertRaises((unit_test_api.ApiValueError, unit_test_api.ApiTypeError)):
self.api.post(
accept_content_types=(accept_content_type,)
)
self.assert_pool_manager_request_called_with(
mock_request,
self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes',
method='post'.upper(),
content_type=None,
accept_content_type=accept_content_type,
)
def test_ignores_non_strings_passes(self):
# ignores non-strings
accept_content_type = 'application/json'
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
100
)
mock_request.return_value = self.response(
self.json_bytes(payload),
status=self.response_status
)
api_response = self.api.post(
accept_content_types=(accept_content_type,)
)
self.assert_pool_manager_request_called_with(
mock_request,
self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes',
method='post'.upper(),
accept_content_type=accept_content_type,
)
assert isinstance(api_response.response, urllib3.HTTPResponse)
assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson)
deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data(
payload,
_configuration=self._configuration
)
assert api_response.body == deserialized_response_body
def test_shorter_is_valid_passes(self):
# shorter is valid
accept_content_type = 'application/json'
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
"f"
)
mock_request.return_value = self.response(
self.json_bytes(payload),
status=self.response_status
)
api_response = self.api.post(
accept_content_types=(accept_content_type,)
)
self.assert_pool_manager_request_called_with(
mock_request,
self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes',
method='post'.upper(),
accept_content_type=accept_content_type,
)
assert isinstance(api_response.response, urllib3.HTTPResponse)
assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson)
deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data(
payload,
_configuration=self._configuration
)
assert api_response.body == deserialized_response_body
def test_two_supplementary_unicode_code_points_is_long_enough_passes(self):
# two supplementary Unicode code points is long enough
accept_content_type = 'application/json'
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
"💩💩"
)
mock_request.return_value = self.response(
self.json_bytes(payload),
status=self.response_status
)
api_response = self.api.post(
accept_content_types=(accept_content_type,)
)
self.assert_pool_manager_request_called_with(
mock_request,
self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes',
method='post'.upper(),
accept_content_type=accept_content_type,
)
assert isinstance(api_response.response, urllib3.HTTPResponse)
assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson)
deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data(
payload,
_configuration=self._configuration
)
assert api_response.body == deserialized_response_body
def test_exact_length_is_valid_passes(self):
# exact length is valid
accept_content_type = 'application/json'
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
"fo"
)
mock_request.return_value = self.response(
self.json_bytes(payload),
status=self.response_status
)
api_response = self.api.post(
accept_content_types=(accept_content_type,)
)
self.assert_pool_manager_request_called_with(
mock_request,
self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes',
method='post'.upper(),
accept_content_type=accept_content_type,
)
assert isinstance(api_response.response, urllib3.HTTPResponse)
assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson)
deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data(
payload,
_configuration=self._configuration
)
assert api_response.body == deserialized_response_body
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | InfoSec812.noreply@github.com |
55c74120bbc858ded14dfd2fcc953a98e1bb0d3a | 3de0943794ef0b94732a339b40b070cfae855e91 | /django_helloworld/view.py | da4287510134f37b8ad4e6617d0a9568c0542949 | [] | no_license | haibin-chen/TRON_AI_WEB | 72f675e337203cb420b511ebb51323636d740456 | 859b1ea1ccca07a0ffab627e8f68c5a8ce1d82ce | refs/heads/master | 2020-07-05T22:47:19.883415 | 2019-08-16T22:38:44 | 2019-08-16T22:38:44 | 202,805,880 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | #from django.shortcuts import HttpResponse
from django.shortcuts import render
user_list = []
def index(request):
#return HttpResponse("Hello world")
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
my_dic = {'user': username, 'pwd': password}
user_list.append(my_dic)
return render(request, 'index.html', {'data':user_list}) | [
"18688017303@163.com"
] | 18688017303@163.com |
38801dbfe2808511d05323af89e49be9254d06bd | 40b5c4a77be465b47fe6fd7ff408db957261cc7f | /python-spake2-0.7/setup.py | c6365b7a7fcc9b95f11fa6dfb09513fabbc2ab8f | [
"MIT"
] | permissive | warner/spake2-interop-server | 7c1f0502a93615d2e2b5b7a323731a7e20040f86 | b3f2ae42971e4217d9f503bb672b2d9288225acc | refs/heads/master | 2021-01-25T11:27:45.696023 | 2017-06-10T22:15:15 | 2017-06-10T22:15:15 | 93,924,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | "A server that performs SPAKE2 operations, for interoperability testing."
# Install this, then run "twist spake2_interop" and hit http://HOST:8705/
from setuptools import setup
import versioneer
setup(
name="spake2-interop-python-spake2-0.7",
version=versioneer.get_version(),
author="Brian Warner",
author_email="warner@lothar.com",
package_dir={"": "src"},
# this must be installed into its own virtualenv (e.g. spake2-0.7 can't
# share a venv with spake2-0.3), so we don't need a version-specific
# package name, and keeping it neutral will minimize the diff
packages=["spake2_interop_python"],
license="MIT",
cmdclass=versioneer.get_cmdclass(),
install_requires=[
"spake2==0.7",
],
entry_points={
"console_scripts":
[
"spake2_interop_python_0_7 = spake2_interop_python:run",
]
},
)
| [
"warner@lothar.com"
] | warner@lothar.com |
5d249e5a94dad27f9c4ca7c6dc0f40d358d1085d | 3e8438d40886725baa7ad9b567f88cf38de1669c | /docs/conf.py | b4d11a71de3411adaffdb2af56ce249591fd1228 | [
"MIT",
"BSD-3-Clause"
] | permissive | weisjohn/django-podcasting | 64d53063997abfb9a54cb3684ccf3f512a0256e3 | 4a66e36d063a71d57029e4abcecfaf675f399a8d | refs/heads/master | 2021-01-16T22:57:56.892493 | 2012-09-19T16:27:55 | 2012-09-19T16:27:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,061 | py | # -*- coding: utf-8 -*-
#
# django-podcasting documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 30 20:36:20 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-podcasting'
copyright = u'2011, Thomas Schreiber'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.4'
# The full version, including alpha/beta/rc tags.
release = '0.9.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-podcastingdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-podcasting.tex', u'django\\_podcasting Documentation',
u'Thomas Schreiber', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-podcasting', u'django-podcasting Documentation',
[u'Thomas Schreiber'], 1)
]
| [
"tom@insatsu.us"
] | tom@insatsu.us |
5d372241bfa8c697447a27443748db1eddb6dd54 | 439c62f4805a7058baa909332e114394ed72e2fa | /apps/webtrike/dmf/urls.py | cdf76512c1fc42da79d109aab5b6e859e7a474df | [
"BSD-3-Clause"
] | permissive | webtrike/webtrike | 17c4576eb64d2fe8a3f3d6f5612e95fa1a417185 | 2785ffe74d33e0e8fed1118b501165381e521416 | refs/heads/master | 2021-01-15T16:53:32.006292 | 2014-11-06T23:56:20 | 2014-11-06T23:56:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,540 | py | # Copyright (c) 2003-2014 COMMONWEALTH SCIENTIFIC AND INDUSTRIAL RESEARCH
# ORGANISATION ('CSIRO'). All rights reserved.
#
# This licence is based on the standard BSD Licence.
#
# 1. Redistribution and use of this software in source and binary forms, with
# or without modification, are permitted provided that the following
# conditions are met:
#
# * Redistributions of the software in source code form must retain the
# above copyright notice, this list of conditions and the following
# disclaimer.
# * Redistributions in of the software in binary code form must reproduce
# the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
# * Neither the name of the CSIRO nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# 2. THIS SOFTWARE IS PROVIDED BY CSIRO AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED.
#
# 3. IN NO EVENT SHALL CSIRO OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# The following section provides additional exclusionary and limitations
# provisions in addition to those of the standard BSD Licence:
#
# 4. FOR CLARITY THE LIMITATIONS ON CSIRO AND CONTRIBUTORS' LIABILITY
# OUTLINED IN CLAUSES 2 AND 3 APPLY TO THE EXTENT PERMITTED BY LAW. CSIRO
# AND CONTRIBUTOR LIABILITY IN RESPECT OF ANY IMPLIED GUARANTEES WHICH
# CANNOT BE EXCLUDED UNDER LAW IS LIMITED AT CSIRO'S OPTION TO:
#
# (i) THE REPLACEMENT OF THE SOFTWARE OR THE SUPPLY OF EQUIVALENT
# SOFTWARE;
# (ii) THE REPAIR OF THE SOFTWARE;
# (iii) THE PAYMENT OF THE COST OF REPLACING THE SOFTWARE OR OF ACQUIRING
# EQUIVALENT SOFTWARE OR HAVING THE SOFTWARE REPAIRED.
#
# 5. CSIRO LICENCES THE SOFTWARE ONLY TO THE EXTENT CSIRO HAS RIGHT, TITLE
# AND INTEREST IN THE SOFTWARE.
#
from django.conf.urls import patterns, include, url
from apps.webtrike.trike.views import TrikeCommandView
from apps.webtrike.dmf.views import providers_for_model, providers_for_model_dummy, clear_cache
from apps.webtrike.dmf.cmds import DataSets, DataStreams, DataTypes
urlpatterns = patterns('',
url(r'^types/$', TrikeCommandView.as_view(cmd=DataTypes, fieldName='types'), name='data-types'),
url(r'^streams/$', TrikeCommandView.as_view(cmd=DataStreams, fieldName='streams'), name='data-streams'),
url(r'^streams/sets/(?P<stream>[-\w]+)/$', TrikeCommandView.as_view(cmd=DataSets, fieldName='sets'), name='sets-for-streams'),
url(r'^providers/(?P<name>[-\w]+)/$', providers_for_model, name='model-providers'),
url(r'^providers/$', providers_for_model_dummy, name='model-providers-base-url'),
url(r'^clearcache/$', clear_cache, name='clear-cache'),
)
| [
"Simon.Pigot@csiro.au"
] | Simon.Pigot@csiro.au |
8c89aacaee5a7483a793bb04724f4a861b956c0e | e4596f84d7d241fe6018cff171c00660f291d153 | /app/__init__.py | f0bb3382d64641f9a1427ac19fe8ed432aadd8c8 | [] | no_license | PablNico/aula-flask | e4aad8c3a5801183d0206d64bb59639be555702e | c66d5c8f0b91fff9e96ed0bfbbb66544c30ba967 | refs/heads/master | 2020-09-11T06:31:18.079353 | 2019-11-16T05:27:13 | 2019-11-16T05:27:13 | 221,971,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
from app.models import tables , forms
from app.controllers import default | [
"nico_camboriu@hotmail.com"
] | nico_camboriu@hotmail.com |
9fa28da8427b89b3d954bdd756fd2ebcba4686a1 | 83048ab1abb6941ed0b19fb5e5ff4a9d14b48e8c | /CODEFORCES/two_teams.py | 7d256d350d953f576fe903bf8811b7a18f57716a | [] | no_license | harshitalpha/Algorithms | ebad07cc77516ab5c35ae414462d10a38d5ef97e | 2f7dcf4c3bb4390267231c7c96f7e76399c0166e | refs/heads/master | 2021-07-14T17:34:02.546583 | 2020-06-25T06:38:39 | 2020-06-25T06:38:39 | 178,813,562 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | t = int(input())
while(t):
t = t - 1
n = int(input())
a = [int(s) for s in input().split()]
d = {}
for i in a:
try:
d[i] += 1
except KeyError:
d[i] = 1
max_size = d[a[0]]
ele_in_sec_arr = a[0]
for i in d.keys():
if d[i] > max_size:
max_size = d[i]
ele_in_sec_arr = i
count = 0
for i in d.keys():
if i is not ele_in_sec_arr:
count = count + 1
if count == max_size:
print(max_size)
elif count == max_size - 1:
print(count)
elif count <= max_size-2:
print(count+1)
elif count > max_size:
print(max_size)
| [
"harshitsinghal1103@gmail.com"
] | harshitsinghal1103@gmail.com |
b67ec65da5b89ee26ecfac71462afdedf4ad07d3 | a72f39b82966cd6e2a3673851433ce7db550429a | /configs/_base_/models/lxmert/lxmert_vqa_config.py | 781219951d7cd25d348c58e16f382837a1dcbeaf | [
"Apache-2.0"
] | permissive | linxi1158/iMIX | 85841d6b95e1d99ed421a1ac3667658e49cae6fc | af87a17275f02c94932bb2e29f132a84db812002 | refs/heads/master | 2023-06-09T23:37:46.534031 | 2021-06-30T12:09:42 | 2021-06-30T12:09:42 | 381,608,650 | 0 | 0 | Apache-2.0 | 2021-06-30T07:08:40 | 2021-06-30T07:08:39 | null | UTF-8 | Python | false | false | 1,753 | py | # model settings
model = dict(
type='LXMERT',
params=dict(
random_initialize=False,
num_labels=3129,
# BertConfig
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act='gelu',
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
#
mode='lxr',
l_layers=9, # 12
x_layers=5, # 5
r_layers=5, # 0
visual_feat_dim=2048,
visual_pos_dim=4,
freeze_base=False,
max_seq_length=20,
model='bert',
training_head_type='vqa2',
bert_model_name='bert-base-uncased',
pretrained_path='/home/datasets/mix_data/iMIX/data/models/model_LXRT.pth',
label2ans_path='/home/datasets/mix_data/lxmert/vqa/trainval_label2ans.json',
))
loss = dict(type='LogitBinaryCrossEntropy')
optimizer = dict(
type='BertAdam',
lr=5e-5,
weight_decay=0.01,
eps=1e-6,
betas=[0.9, 0.999],
max_grad_norm=-1,
training_encoder_lr_multiply=1,
)
optimizer_config = dict(grad_clip=dict(max_norm=5))
'''
fp16 = dict(
init_scale=2.**16,
growth_factor=2.0,
backoff_factor=0.5,
growth_interval=2000,
)
'''
lr_config = dict(
warmup=0.1,
warmup_method='warmup_linear',
# max_iters=55472, # ceil(totoal 443753 / batch size 32) * epoch size datasets: train
max_iters=79012, # floor(totoal 632117 / batch size 32) * epoch size datasets: train, nominival
policy='BertWarmupLinearLR')
total_epochs = 4
seed = 9595
| [
"hsslab.inspur@gmail.com"
] | hsslab.inspur@gmail.com |
3ce16db906255fed4a034b681f5a1357ab4305d5 | aa043fa26904adf717e4d05f74c5589d0bee8842 | /SourceCode/time_counter.py | ca5ff71e7b4a267978928ee8559b7972488977eb | [] | no_license | mrvladkino27/dbis-Lab-2 | 3485453245a6399655a3dd99543cadeda346729a | 75c337689234bb868305668c78b31c5d31750f00 | refs/heads/main | 2023-04-02T00:13:16.947608 | 2021-04-05T21:00:57 | 2021-04-05T21:00:57 | 354,691,554 | 0 | 0 | null | 2021-04-05T20:58:24 | 2021-04-05T02:08:42 | Python | UTF-8 | Python | false | false | 637 | py | import time
from functools import wraps
time_result = 'Results/TimeResult.txt'
with open(time_result, 'w'):
pass
def profile_time(fn):
@wraps(fn)
def inner(*args, **kwargs):
with open(time_result, 'a') as profile_log:
fn_kwargs_str = ', '.join(f'{k}={v}' for k, v in kwargs.items())
profile_log.write(f'\n{fn.__name__}({fn_kwargs_str})\n')
# Measure time
t = time.perf_counter()
retval = fn(*args, **kwargs)
elapsed = time.perf_counter() - t
profile_log.write(f'Time {elapsed:0.4} s\n')
return retval
return inner
| [
"mar4vv@gmail.com"
] | mar4vv@gmail.com |
8626edcebc5d57619798aec921223388d499ef0b | f77327128a8da9702ae3443e2171bc7485ceb915 | /cadence/items.py | 08b1b50b16175e30c34717833ce6af94ae712ed4 | [] | no_license | SimeonYS/cadence | 0eeba6a54c03ffb2d55466f9d8de6f1b1662002f | cdaef13c85a03e031a0050c89c17249cd7d83125 | refs/heads/main | 2023-03-31T08:24:41.408507 | 2021-03-31T10:14:01 | 2021-03-31T10:14:01 | 353,312,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | import scrapy
class CadenceItem(scrapy.Item):
title = scrapy.Field()
content = scrapy.Field()
date = scrapy.Field()
link = scrapy.Field()
| [
"simeon.simeonov@ADPVT.com"
] | simeon.simeonov@ADPVT.com |
17602e35cc61bc0f7fc211873d8f4e8f3498781a | 4ca44b7bdb470fcbbd60c2868706dbd42b1984c9 | /20.12.14/BOJ_20056.py | d156e0cef3cfd5583ae7fcf2a95e7de4fd8f8efa | [] | no_license | titiman1013/Algorithm | 3b3d14b3e2f0cbc4859029eb73ad959ec8778629 | 8a67e36931c42422779a4c90859b665ee468255b | refs/heads/master | 2023-06-29T17:04:40.015311 | 2021-07-06T01:37:29 | 2021-07-06T01:37:29 | 242,510,483 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,827 | py | import sys; sys.stdin = open('text1.txt', 'r')
# solve test1
# dx = [-1, -1, 0, 1, 1, 1, 0, -1]
# dy = [0, 1, 1, 1, 0, -1, -1, -1]
# def move_fireball(x, y, m, s, d):
# for _ in range(s):
# nx = x + dx[d]
# ny = y + dy[d]
# if 0 <= nx < N and 0 <= ny < N:
# x, y = nx, ny
# else:
# if nx < 0 or nx >= N:
# if nx < 0:
# x = N + nx
# else:
# x = nx - N
# if ny < 0 or nx >= N:
# if ny < 0:
# y = N + ny
# else:
# y = ny - N
# if bool(arr[x][y]):
# arr[x][y].append([x, y, m, s, d])
# else:
# arr[x][y] = [[x, y, m, s, d]]
# return
# def sum_fireball(sum_list):
# list_cnt = len(sum_list)
# m = 0
# s = 0
# d = []
# for idx in range(list_cnt):
# m += sum_list[idx][2]
# s += sum_list[idx][3]
# if d % 2:
# d.append(1)
# else:
# d.append(0)
# m = m // 5
# if m == 0:
# return [0]
# s = s // list_cnt
# d_check = True
# temp_d = d[0]
# for i in range(1, len(d)):
# if d[i] != temp_d:
# d_check = False
# break
# if d_check == True:
# d = [0, 2, 4, 6]
# else:
# d = [1, 3, 5, 7]
# temp_list = []
# for i in range(4):
# temp_list.append([sum_list[0], sum_list[1], m, s, d[i]])
# return temp_list
# # 방향
# # 인접한 행렬 12시부터 시계방향
# # 7 0 1
# # 6 2
# # 5 4 3
# for tc in range(1, int(input()) + 1):
# N, M, K = map(int, input().split())
# # [r, c, m, s, d]
# items = [list(map(int, input().split())) for _ in range(M)]
# arr = [[0] * N for _ in range(N)]
# if K > 0:
# # 처음 시행
# for item in items:
# move_fireball(item[0] - 1, item[1] - 1, item[2], item[3], item[4])
# print(arr)
# move_cnt = 1
# while move_cnt <= K:
# # 움직이기
# for i in range(N):
# for j in range(N):
# if bool(arr[i][j]):
# if len(arr[i][j]) >= 2:
# temp_list = arr[i][j][0]
# arr[i][j] = 0
# for k in range(len(temp_list)):
# move_fireball(temp_list[k][0], temp_list[k][1], temp_list[k][2], temp_list[k][3], temp_list[k][4])
# else:
# temp_list = arr[i][j][0]
# arr[i][j] = 0
# print(arr)
# move_fireball(temp_list[0], temp_list[1], temp_list[2], temp_list[3], temp_list[4])
# # 합치기
# for i in range(N):
# for j in range(N):
# if len(arr[i][j]) >= 2:
# arr[i][j] = sum_fireball(arr[i][j])
# move_cnt += 1
# res = 0
# for i in range(N):
# for j in range(N):
# if bool(arr[i][j]):
# if len(arr[i][j]) >= 2:
# for k in range(len(arr[i][j])):
# res += arr[i][j][k][2]
# else:
# res += arr[i][j][0][2]
# print(f'#{tc} {res}')
# solve test2
from collections import deque
for tc in range(1, int(input()) + 1):
N, M, K = map(int, input().split())
# [r, c, m, s, d]
items = [list(map(int, input().split())) for _ in range(M)]
arr = [[0] * N for _ in range(N)]
q = deque()
for item in items:
q.append(item)
for _ in range(K):
while q:
| [
"hyunsukr1013@gmail.com"
] | hyunsukr1013@gmail.com |
2690bfb16e5b6214300846a92dcba2e377262ff9 | c55333c0b3b8c199c0489101e077e8bfd5869948 | /pcdet/ops/pointnet2/pointnet2_stack/pointnet2_modules.py | 659af35d9b3e8b458f2b8ca57910b36bb9d0d2e7 | [
"Apache-2.0"
] | permissive | kathy-lee/astyx-pcdet | 21419b82beec87194fdf942e5a19f668a98866fe | 355bfd88c37e1b717482f651778c1d4cb2f647d2 | refs/heads/dev | 2023-04-01T08:42:47.995985 | 2021-04-11T08:54:38 | 2021-04-11T08:54:38 | 299,564,432 | 4 | 1 | Apache-2.0 | 2021-04-11T08:54:38 | 2020-09-29T09:14:30 | Python | UTF-8 | Python | false | false | 5,425 | py | from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
class StackSAModuleMSG(nn.Module):
def __init__(self, *, radii: List[float], nsamples: List[int], mlps: List[List[int]],
use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
radii: list of float, list of radii to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features=None, empty_voxel_set_zeros=True):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for k in range(len(self.groupers)):
new_features, ball_idxs = self.groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M1 + M2, C, nsample)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[k](new_features) # (1, C, M1 + M2 ..., nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
class StackPointnetFPModule(nn.Module):
def __init__(self, *, mlp: List[int]):
"""
Args:
mlp: list of int
"""
super().__init__()
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend([
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU()
])
self.mlp = nn.Sequential(*shared_mlps)
def forward(self, unknown, unknown_batch_cnt, known, known_batch_cnt, unknown_feats=None, known_feats=None):
"""
Args:
unknown: (N1 + N2 ..., 3)
known: (M1 + M2 ..., 3)
unknow_feats: (N1 + N2 ..., C1)
known_feats: (M1 + M2 ..., C2)
Returns:
new_features: (N1 + N2 ..., C_out)
"""
dist, idx = pointnet2_utils.three_nn(unknown, unknown_batch_cnt, known, known_batch_cnt)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=-1, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
if unknown_feats is not None:
new_features = torch.cat([interpolated_feats, unknown_feats], dim=1) # (N1 + N2 ..., C2 + C1)
else:
new_features = interpolated_feats
new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1)
new_features = self.mlp(new_features)
new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C)
return new_features
| [
"kangle.leehd@gmail.com"
] | kangle.leehd@gmail.com |
f68fdc5e64491bc82fb7acc885b1ec07cf1f8e1e | 8a716e77b457ab4262965f421681ce6028d5da4e | /04_Code/02_Slicer/parameters/wcs_args.py | ee1a816024316e5188e65dff86befa1bd17141e0 | [] | no_license | wenqian157/eggShell2.0 | 3d1adb0b8ebc04fd57d7f913176877bb889b5b35 | 80ab6c9e469e2bd46b52f113581545d190afe063 | refs/heads/master | 2020-06-27T01:16:07.008979 | 2019-09-13T06:32:25 | 2019-09-13T06:32:25 | 199,807,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py |
wcsargs = {
'line_definition': 12,
'normal_speed': 48,
'fast_speed': 150,
}
| [
"abarney@student.ethz.ch"
] | abarney@student.ethz.ch |
15d89bb2c2dec4533238724966437d8e1504df66 | dc20b5c8c5a3d8657bf40a6117bc438383b20f94 | /0x11-python-network_1/10-my_github.py | b8c3f5f27155a11e0d8f6326668497ecc535411e | [] | no_license | steffanynaranjov/holbertonschool-higher_level_programming | f7b0fca2b76e3942f960ca2e048373acbd0c9b41 | 67dcce3040c67c7a18e6f04a761224f1a754deac | refs/heads/master | 2023-03-02T23:18:05.640174 | 2021-02-15T16:08:42 | 2021-02-15T16:08:42 | 291,816,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | #!/usr/bin/python3
"""Write a Python script that takes your Github credentials (username and
password) and uses the Github API to display your id"""
if __name__ == '__main__':
import requests
import sys
authorize = requests.auth.HTTPBasicAuth(sys.argv[1], sys.argv[2])
response = requests.get('https://api.github.com/user', auth=authorize)
try:
print(response.json().get('id'))
except ValueError:
print("Not a valid JSON")
| [
"steffany.naranjo@gmail.com"
] | steffany.naranjo@gmail.com |
96949ce95b73388743e1f2c1e83eef7a4cd24919 | 04e3e11a65c7eeb9e4616f15e3458187c62633c7 | /server/huddle/workspace.py | 36cdd7435b359b121ae1606a8f4f640f66749b44 | [] | no_license | Zhixinb/Huddle | f804a329442c705c5949cfdb7c054593f639f2ec | 47e51ff300fd9e2cb040697804294d41d844a42f | refs/heads/main | 2023-03-27T08:07:38.061667 | 2021-03-27T01:27:24 | 2021-03-27T01:27:24 | 307,011,268 | 0 | 0 | null | 2021-02-18T03:54:21 | 2020-10-25T02:41:27 | Vue | UTF-8 | Python | false | false | 3,448 | py | """Object for tracking workspace status"""
from datetime import datetime
import time
import random
import math
import string
import os
from enum import IntEnum
from huddle import users
class Permission(IntEnum):
OWNER = 0
EDITOR = 1
VIEWER = 2
PERM_DENIED = 3
class Workspace(object):
workspace_ids = set()
# pylint: disable=too-many-instance-attributes
"""Object for tracking game stats"""
def __init__(self, host, permission=Permission.OWNER):
self.date_created = datetime.now()
self.date_modified = self.date_created
self.workspace_id = self.generate_workspace_id()
self.users = users.Users()
self.user_perms = {}
# self.global_share_state = Permission.PERM_DENIED
# initialize workspace
self.generate_workspace(host)
def to_json(self):
"""Serialize object to JSON"""
return {
"workspace_id": self.workspace_id,
"users": self.users.as_dict(),
"date_created": str(self.date_created),
"date_modified": str(self.date_modified),
"user_perms" : self.user_perms,
"global_share_state": self.global_share_state
}
def generate_workspace(self, host):
self.user_perms[host] = Permission.OWNER
self.global_share_state = Permission.PERM_DENIED # SHOULD BE DENIED, testing
def add_user(self, sid, param):
"""Add username to user array"""
self.users.add(sid, param)
def remove_user(self, sid):
"""Remove username to user array"""
self.users.remove(sid)
def has_access(self, sid):
return sid in self.user_perms
def get_user_perms(self):
return [{"uid": uid, "perm": perm} for (uid, perm) in self.user_perms.items()]
def get_user_perm(self, uid):
if (uid in self.user_perms):
return self.user_perms[uid]
else:
return self.global_share_state
def remove_user_perm(self, uid):
if uid in self.user_perms and (self.user_perms[uid] is not Permission.OWNER):
self.user_perms.pop(uid, None)
return True
else:
return False
def get_role(self, uid):
perm = self.get_user_perm(uid)
return Permission(perm).name
def get_can_share(self, uid):
# only owner, editor can share
perm = self.get_user_perm(uid)
return (perm is Permission.OWNER) or (perm is Permission.EDITOR)
@classmethod
def generate_workspace_id(cls):
"""Generate a random workspace ID"""
id_length = 5
candidate_id = ''.join(random.SystemRandom().choice(
string.ascii_uppercase) for _ in range(id_length))
if candidate_id in Workspace.workspace_ids:
return generate_workspace_id()
else:
return candidate_id
@classmethod
def getPermissionDict(cls):
return {permission.name:permission.value for permission in Permission}
def regenerate_id(self):
self.workspace_id = self.generate_workspace_id()
def uptime(self):
# 2018-08-12 10:12:25.700528
fmt = '%Y-%m-%d %H:%M:%S'
d1 = self.date_created
d2 = self.date_modified
# Convert to Unix timestamp
d1_ts = time.mktime(d1.timetuple())
d2_ts = time.mktime(d2.timetuple())
return round(float(d2_ts-d1_ts) / 60, 2) | [
"r914721222@gmail.com"
] | r914721222@gmail.com |
09ea73874644048de8c9b8f9853ef8b89a19b1d7 | 9551d4d19898b3acbe0c7ebab89ce3a55e9aab89 | /app/models.py | 6641f863552b0afe64fcd3192c1b7f8de7aaef5e | [] | no_license | erntye/Attendy | 4d8eb11aee52c0f3e85e17b6da76c36cea072e65 | e32889d0e17b5187845b0d3ac784e4e299866cb6 | refs/heads/master | 2020-05-01T02:04:47.408215 | 2019-03-22T21:18:56 | 2019-03-22T21:18:56 | 177,209,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | from django.contrib.auth.models import User
from django.db import models
class Classroom(models.Model):
name = models.CharField(max_length=200)
code = models.CharField(max_length=10)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return str(self.name) + " (" + self.code + ")"
class Student(models.Model):
email = models.CharField(max_length=200)
studentID = models.CharField(max_length=100)
def __str__(self):
return str(self.studentID)
class ClassList(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE, null=True)
classroom = models.ForeignKey(Classroom, on_delete=models.CASCADE, null=True)
class ClassTime(models.Model):
classroom = models.ForeignKey(Classroom, on_delete=models.CASCADE)
day = models.CharField(max_length=100)
semester = models.CharField(max_length=100)
year = models.CharField(max_length=100)
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
class Attendance(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE)
classroom = models.ForeignKey(Classroom, on_delete=models.CASCADE)
session = models.IntegerField(default=0)
confidence_score = models.IntegerField(default=0)
status = models.CharField(max_length=10, default=None)
ip = models.CharField(max_length=100, default=None)
offset = models.CharField(max_length=100, default=None)
song_name = models.CharField(max_length=100, default=None)
offset_seconds = models.CharField(max_length=100, default=None)
match_time = models.CharField(max_length=100, default=None)
time_in = models.DateTimeField(null=True)
dejavu_log = models.TextField(default=None)
class StudentInbound(models.Model):
studentID = models.CharField(max_length=100)
classroom = models.ForeignKey(Classroom, on_delete=models.CASCADE)
session = models.IntegerField(default=0)
time_in = models.DateTimeField(null=True)
class InstructorInbound(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
classroom = models.ForeignKey(Classroom, on_delete=models.CASCADE)
session = models.IntegerField(default=0)
time_in = models.DateTimeField(null=True) | [
"erntye@hotmail.com"
] | erntye@hotmail.com |
2d5743604e3896bbdc53c29f382237794ab98650 | 409e6adc7c177e7a51bcd7622b4c96ad144a61ea | /working_waterfronts/working_waterfronts_api/tests/views/entry/test_list_hazards.py | a003441a8695570701aa22de5785d5b4d623b4aa | [
"Apache-2.0"
] | permissive | osu-cass/working-waterfronts-api | c540192b0630529af2e6da845a1892c4c7f1fa9b | f278361ee8a1433834be03be4464fc613c8c8661 | refs/heads/develop | 2021-01-17T09:29:00.611175 | 2021-01-05T20:25:07 | 2021-01-05T20:25:07 | 24,165,030 | 0 | 0 | null | 2017-02-27T18:03:24 | 2014-09-17T22:40:02 | Python | UTF-8 | Python | false | false | 2,391 | py | from django.test import TestCase
from django.core.urlresolvers import reverse
from working_waterfronts.working_waterfronts_api.models import Hazard
from django.contrib.auth.models import User
class ListHazardTestCase(TestCase):
fixtures = ['thirtythree']
def setUp(self):
user = User.objects.create_user(
'temporary', 'temporary@gmail.com', 'temporary')
user.save()
response = self.client.login(
username='temporary', password='temporary')
self.assertEqual(response, True)
def test_url_endpoint(self):
url = reverse('entry-list-hazards')
self.assertEqual(url, '/entry/hazards')
def test_not_logged_in(self):
self.client.logout()
response = self.client.get(
reverse('entry-list-hazards'))
self.assertRedirects(response, '/login?next=/entry/hazards')
def test_list_items(self):
"""
Tests to see if the list of hazards contains the proper
hazards and proper hazard data
"""
page_1 = self.client.get(reverse('entry-list-hazards')).context
page_2 = self.client.get(
'{}?page=2'.format(reverse('entry-list-hazards'))).context
page_3 = self.client.get(
'{}?page=3'.format(reverse('entry-list-hazards'))).context
page_4 = self.client.get(
'{}?page=4'.format(reverse('entry-list-hazards'))).context
page_nan = self.client.get(
'{}?page=NaN'.format(reverse('entry-list-hazards'))).context
self.assertEqual(
list(page_1['item_list']),
list(Hazard.objects.order_by('name')[:15]))
self.assertEqual(
list(page_2['item_list']),
list(Hazard.objects.order_by('name')[15:30]))
self.assertEqual(
list(page_3['item_list']),
list(Hazard.objects.order_by('name')[30:33]))
# Page 4 should be identical to Page 3, as these fixtures
# have enough content for three pages (15 items per page, 33 items)
self.assertEqual(
list(page_3['item_list']),
list(page_4['item_list']))
# Page NaN should be identical to Page 1, as Django paginator returns
# the first page if the page is not an int
self.assertEqual(
list(page_1['item_list']),
list(page_nan['item_list']))
| [
"tschuy@osuosl.org"
] | tschuy@osuosl.org |
96fc43212ea08796956075faccf1bdcdd550efe5 | ce8e96781573bef4f4a0ccfb968ee83588f7d173 | /list/list6/list6-4.py | bf985a2d617053d0f917ed9f1307987e6090b501 | [] | no_license | lelufig/program | 29645f734ec3e03e32b1e2e32d720414f879733b | 4da50505bafadc6d73e8164ee8cfeb8b6bd7e626 | refs/heads/master | 2020-04-05T07:28:20.168867 | 2018-11-09T06:35:54 | 2018-11-09T06:35:54 | 156,676,575 | 0 | 0 | null | 2018-11-09T06:35:55 | 2018-11-08T08:45:30 | Python | UTF-8 | Python | false | false | 274 | py | def max(a,b):
if a > b:
max2 = a
else:
max2 = b
return max2
a = int(input("整数1:"))
b = int(input("整数2:"))
c = int(input("整数3:"))
d = int(input("整数4:"))
print(f"大きい方の値は{max(max(max(a,b),c),d)}です") | [
"lelu-wangel.294@ezweb.ne.jp"
] | lelu-wangel.294@ezweb.ne.jp |
01c10d64dc9285a7bb24c2396ec53147de3fdef6 | 62bb15638c56adaca9a9ac389c4dbcdb1ebf0caa | /send-file-example.py | 4df9174fa931a6b8533bb95700df098fac9edd72 | [
"MIT"
] | permissive | eddyvdaker/Flask-Send-File-Example | 65c08cd2f4e1d14e5b30bd5b82c40d8783e11382 | 86e33803c81c0aa62050416b163eee61d223d9c3 | refs/heads/master | 2020-03-12T16:09:55.340170 | 2019-04-23T13:48:12 | 2019-04-23T13:48:12 | 130,709,285 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from flask import Flask, send_file
app = Flask(__name__)
@app.route('/')
def file_downloads():
return '''
<html>
<a href="http://127.0.0.1:5000/">Home</a><br>
<a href="/file"><button>Download</button></a>
</html>
'''
@app.route('/file')
def return_files():
return send_file(
'test.csv',
mimetype='text/csv',
attachment_filename='test.csv',
as_attachment=True
)
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | eddyvdaker.noreply@github.com |
b9e261bb4e999ccf8607482482e17a1b0b9f245f | 15ecea90563dd33796077142de400aaf9fd227fa | /Introduction to Python/Strings/String formatting/string_formatting.py | adce202b8e2af040f39ab4e48b3494f9b0a77913 | [] | no_license | isokolov/Python-EduTools | 4593f93c4388c6020fc03089a295f0bb50a0d824 | 8d75cccdc7b3948df668b4755566dd504b32cf33 | refs/heads/main | 2022-12-23T19:04:05.247358 | 2020-10-02T09:04:26 | 2020-10-02T09:04:26 | 300,346,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | name = "John"
print("Hello, PyCharm! My name is %s!" % name) # Note: %s is inside the string, % is after the string
years = 38
print("I'm %d years old" % years)
| [
"illya.sokolov82@gmail.com"
] | illya.sokolov82@gmail.com |
16f4f84a799fbad2d4951affd28a3893ee356839 | a667b52cb8d2ec857c55d33f04fc0e81d36dc681 | /options/data/mc/pipipi0_DecProdCut_PHSP_2012_MC_2012_Beam4000GeV-2012-MagUp-Nu2.5-Pythia8_Sim08e_Digi13_Trig0x409f0045_Reco14a_Stripping20r0p2NoPrescalingFlagged_27163403_ALLSTREAMS.DST.py | befddb5e34f37022361b1b2ddd67efe8ea3fa6bd | [] | no_license | wenyanyin/CP_violation_simulation | 639d73333a3795654275cb43cc7dad7c742d1be1 | 7b93b2fe1050fb30d0b809b758cd5a3b2824b875 | refs/heads/master | 2022-04-29T14:19:23.744004 | 2022-04-01T13:05:18 | 2022-04-01T13:05:18 | 168,570,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,745 | py | # lb-run LHCbDirac/prod dirac-bookkeeping-get-files -B /MC/2012/Beam4000GeV-2012-MagUp-Nu2.5-Pythia8/Sim08e/Digi13/Trig0x409f0045/Reco14a/Stripping20r0p2NoPrescalingFlagged/27163403/ALLSTREAMS.DST
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(
['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000001_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000003_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000004_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000005_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000006_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000008_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000009_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000011_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000012_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000013_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000014_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000015_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000016_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000017_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000018_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000019_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000020_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000021_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000022_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000024_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000025_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000027_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000028_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000029_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000030_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000033_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000034_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000036_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000037_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000039_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000040_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000042_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000044_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000046_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000048_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000049_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000050_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000052_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000053_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000054_2.AllStreams.dst'],
clear=True)
| [
"Michael.Alexander@glasgow.ac.uk"
] | Michael.Alexander@glasgow.ac.uk |
2f2fa3d5866e1bad552c4381ec35367e15445102 | 1b0cce7af80b0f8029411c1d0ddf9ffa532b462a | /ReportParser/DocumentParser/WriteConfigure.py | 35a8b6521897121b11927747fe3bcc108e07d53e | [] | no_license | jirachikai/ReportParserV0.4 | ba67be5cf97acdd3944463160ff5dea710ee5ebe | 1154139b7ee119a80c5e67ad3c15c1e9b2263eeb | refs/heads/master | 2021-01-19T05:36:26.561324 | 2013-11-01T02:15:18 | 2013-11-01T02:15:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,779 | py | import ConfigParser
import os
config = ConfigParser.ConfigParser()
config.add_section('Functionality')
config.set('Functionality','FT1','myRIO_Functionality No SPI and I2C Test-Suite Report')
config.set('Functionality','FT2','myRIO_Functionality Only SPI and I2C Test-Suite Report')
config.set('Functionality','FT3','myRIO_Module Integration Testing Test-Suite Report')
config.add_section('Performance')
config.set('Performance','PT1','myRIO fixedPersonality Performance No SPI_I2C Test-Suite Report')
config.set('Performance','PT2','myRIO ID instrumentDrivers Performance Test-Suite Report')
config.set('Performance','PT3','myRIO expressVI Performance Test-Suite Report')
config.set('Performance','PT4','myRIO ID I2CSPI Performance Test-Suite Report')
config.add_section('AI\Test_FP AI read all channels.vi')
config.set('AI\Test_FP AI read all channels.vi','Avg Read (ms)','0.02615')
config.add_section('AI\Test_FP AI read and open all channels.vi')
config.set('AI\Test_FP AI read and open all channels.vi','Avg Read (ms)','271.6')
config.add_section('AI\Test_FP AI read single channel.vi')
config.set('AI\Test_FP AI read single channel.vi','Avg Read (ms)','0.00097')
config.add_section('AO\Test_FP AO write all channels.vi')
config.set('AO\Test_FP AO write all channels.vi','Avg Read (ms)','0.00287')
config.add_section('AO\Test_FP AO write and open all channels.vi')
config.set('AO\Test_FP AO write and open all channels.vi','Avg Read (ms)','269.8')
config.add_section('AO\Test_FP AO write single channel.vi')
config.set('AO\Test_FP AO write single channel.vi','Avg Read (ms)','0.00077')
config.add_section('Control Loop\AI+AO\Test_FP Control Loop AI+AO.vi')
config.set('Control Loop\AI+AO\Test_FP Control Loop AI+AO.vi','Avg Read (ms)','0.04282')
config.add_section('DIO\Test_FP DIO read all channels.vi')
config.set('DIO\Test_FP DIO read all channels.vi','Avg Read (ms)','0.00328')
config.add_section('DIO\Test_FP DIO read_write all channels.vi')
config.set('DIO\Test_FP DIO read_write all channels.vi','Avg Read (ms)','0.00468')
config.add_section('DIO\Test_FP DIO write all channels.vi')
config.set('DIO\Test_FP DIO write all channels.vi','Avg Read (ms)','0.00465')
config.add_section('AI\Test_ID AI read all channels.vi')
config.set('AI\Test_ID AI read all channels.vi','Avg Read (ms)','0.01898')
config.add_section('AI\Test_ID AI read and open all channels.vi')
config.set('AI\Test_ID AI read and open all channels.vi','Avg Read (ms)','316.3')
config.add_section('AI\Test_ID AI read and open single channel.vi')
config.set('DIO\Test_FP DIO read_write all channels.vi','Avg Read (ms)','315.8')
config.add_section('AI\Test_ID AI read single channel.vi')
config.set('AI\Test_ID AI read single channel.vi','Avg Read (ms)','0.00771')
config.add_section('AO\Test_ID AO read all channels.vi')
config.set('AO\Test_ID AO read all channels.vi','Avg Read (ms)','0.02146')
config.add_section('AO\Test_ID AO read and open all channels.vi')
config.set('AO\Test_ID AO read and open all channels.vi','Avg Read (ms)','318.9')
config.add_section('AO\Test_ID AO read and open single channel.vi')
config.set('AO\Test_ID AO read and open single channel.vi','Avg Read (ms)','316')
config.add_section('AO\Test_ID AO read single channel.vi')
config.set('AO\Test_ID AO read single channel.vi','Avg Read (ms)','0.01654')
config.add_section('DI\Test_ID DI read all channels.vi')
config.set('DI\Test_ID DI read all channels.vi','Avg Read (ms)','0.14')
config.add_section('DI\Test_ID DI read and open all channels.vi')
config.set('DI\Test_ID DI read and open all channels.vi','Avg Read (ms)','328.1')
config.add_section('DI\Test_ID DI read and open single channel.vi')
config.set('DI\Test_ID DI read and open single channel.vi','Avg Read (ms)','315.9')
config.add_section('DI\Test_ID DI read single channel.vi')
config.set('DI\Test_ID DI read single channel.vi','Avg Read (ms)','0.09')
config.add_section('DO\Test_ID DO write all channels.vi')
config.set('DO\Test_ID DO write all channels.vi','Avg Read (ms)','0.1')
config.add_section('DO\Test_ID DO write single channel.vi')
config.set('DO\Test_ID DO write single channel.vi','Avg Read (ms)','0.1')
config.add_section('PWM\Test_ID PWM All Parallel Loop Rate.vi')
config.set('PWM\Test_ID PWM All Parallel Loop Rate.vi','Avg Read (ms)','0.01257')
config.add_section('PWM\Test_ID PWM All Single Loop Rate.vi')
config.set('PWM\Test_ID PWM All Single Loop Rate.vi','Avg Read (ms)','0.1214')
config.add_section('PWM\Test_ID PWM Loop Rate.vi')
config.set('PWM\Test_ID PWM Loop Rate.vi','Avg Read (ms)','0.01254')
config.add_section('PWM\Test_ID PWM Open and Read Loop Rate.vi')
config.set('PWM\Test_ID PWM Open and Read Loop Rate.vi','Avg Read (ms)','315.8')
config.add_section('QE\Test_ID QE read and open single channel.vi')
config.set('QE\Test_ID QE read and open single channel.vi','Avg Read (ms)','317.3')
config.add_section('QE\Test_ID QE read single channel.vi')
config.set('QE\Test_ID QE read single channel.vi','Avg Read (ms)','3.35')
config.add_section('QE\Test_ID QE read two channels.vi')
config.set('QE\Test_ID QE read two channels.vi','Avg Read (ms)','3.52')
config.add_section('AI\Test_EVI AI read all channels with preopen.vi')
config.set('AI\Test_EVI AI read all channels with preopen.vi','Avg Read (ms)','0.02011')
config.add_section('AI\Test_EVI AI read all channels.vi')
config.set('AI\Test_EVI AI read all channels.vi','Avg Read (ms)','0.02345')
config.add_section('AI\Test_EVI AI read single channel with preopen.vi')
config.set('AI\Test_EVI AI read single channel with preopen.vi','Avg Read (ms)','0.0091')
config.add_section('AI\Test_EVI AI read single channel.vi')
config.set('AI\Test_EVI AI read single channel.vi','Avg Read (ms)','0.01228')
config.add_section('AO\Test_EVI AO write all channels with preopen.vi')
config.set('AO\Test_EVI AO write all channels with preopen.vi','Avg Read (ms)','0.03042')
config.add_section('AO\Test_EVI AO write all channels.vi')
config.set('AO\Test_EVI AO write all channels.vi','Avg Read (ms)','0.03391')
config.add_section('AO\Test_EVI AO write single channel with preopen.vi')
config.set('AO\Test_EVI AO write single channel with preopen.vi','Avg Read (ms)','0.02454')
config.add_section('AO\Test_EVI AO write single channel.vi')
config.set('AO\Test_EVI AO write single channel.vi','Avg Read (ms)','0.02771')
config.add_section('DIO\Test_EVI DI ReadAllInLoop.vi')
config.set('DIO\Test_EVI DI ReadAllInLoop.vi','Avg (ms)','0.1472')
config.add_section('DIO\Test_EVI DI ReadOneInLoop.vi')
config.set('DIO\Test_EVI DI ReadOneInLoop.vi','Avg (ms)','0.09413')
config.add_section('DIO\Test_EVI DO WriteAllInLoop.vi')
config.set('DIO\Test_EVI DO WriteAllInLoop.vi','Avg (ms)','0.1024')
config.add_section('DIO\Test_EVI DO WriteOneInLoop.vi')
config.set('DIO\Test_EVI DO WriteOneInLoop.vi','Avg (ms)','0.0786')
config.add_section('Test_ID SPI read write multiple sample loopback.vi')
config.set('Test_ID SPI read write multiple sample loopback.vi','Avg Read (ms)','26.19')
config.add_section('Test_ID SPI read write single sample loopback.vi')
config.set('Test_ID SPI read write single sample loopback.vi','Avg Read (ms)','16.53')
config.add_section('Test_ID SPI write and open multiple samples.vi')
config.set('Test_ID SPI write and open multiple samples.vi','Avg Read (ms)','4294941')
config.add_section(' Test_ID SPI write and open single sample.vi')
config.set(' Test_ID SPI write and open single sample.vi','Avg Read (ms)','16.86')
config.add_section('Test_ID SPI write multple samples.vi')
config.set('Test_ID SPI write multple samples.vi','Avg Read (ms)','26.19')
config.add_section('Test_ID SPI write single sample.vi')
config.set('Test_ID SPI write single sample.vi','Avg Read (ms)','16.85')
config.write(open('../../Configure.ini','w')) | [
"wenkai.mo.1992@gmail.com"
] | wenkai.mo.1992@gmail.com |
f6aff156beb68f479d76392ed5097e84546ed4e6 | 764a157c1ef369664144a112f390165809c37861 | /apps/app/views.py | 75a6c7124d899b9dae8673fed292fa32dbe61aff | [] | no_license | Maliaotw/gogoweb | b044678b0a34c2748267c8f8ac1f6af91d42bcd0 | aad84f11163e62716a239972436eb92e7cc601d0 | refs/heads/main | 2023-07-07T19:46:37.470811 | 2021-08-31T15:01:58 | 2021-08-31T15:01:58 | 341,254,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from django.shortcuts import render
from apps.app import models
from apps.app import serializers
from rest_framework import viewsets
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.views import APIView, Response
from rest_framework import status
# Create your views here.
class TaskModelViewSet(viewsets.ModelViewSet):
queryset = models.Task.objects.all()
serializer_class = serializers.TaskSerializer
pagination_class = LimitOffsetPagination
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_200_OK)
| [
"MaliaoTW@gmail.com"
] | MaliaoTW@gmail.com |
4c656d8f565ec8089bfd85f6ec20e2b4da3fa207 | eba642e52e36e28ac4f8b7cd567e785e2306cca3 | /python-tutorial/15. Indexing and Slicing with Strings/tut.py | 80fffc115e530f31ce7b1e938bf2f3c41d8b4d91 | [] | no_license | Merodor/FirstTask | 5451b4f12fa08e5309ca96e35c495665d865f9aa | 5b160eab0e92e58b2bfb44a65f96ce9caa3b8b21 | refs/heads/master | 2022-12-10T10:17:10.360865 | 2019-08-25T12:45:27 | 2019-08-25T12:45:27 | 204,288,180 | 0 | 0 | null | 2022-12-04T08:53:07 | 2019-08-25T12:00:29 | Python | UTF-8 | Python | false | false | 186 | py | mystring="abcdefghij"
# print(mystring[2:])
# print(mystring[:3])
# print(mystring[2:6])
# print(mystring[::])
# print(mystring[::3])
print(mystring[1:5:1])
print(mystring[::-1][2:7:2])
| [
"arustamyan@ro.ru"
] | arustamyan@ro.ru |
bef3d9cafc56220be75cafeebf7b7eeebb26d97c | 579c32b089071a2d2dd4b545bf739248fb636199 | /Week2/tpot_titanic_pipeline1.py | 2e0eea476dca401fa46b132b0a9b796824e25604 | [] | no_license | Paul-Hwang/Recommend-System | 98e7e320956a3280203cd65ac4b72a42f4de32a4 | 7a78895f730f19f20b1c10fcb705032df386a6a2 | refs/heads/master | 2020-12-06T04:41:14.585305 | 2020-05-31T16:25:19 | 2020-05-31T16:25:19 | 232,344,644 | 0 | 0 | null | 2020-02-05T16:27:23 | 2020-01-07T14:41:07 | Jupyter Notebook | UTF-8 | Python | false | false | 1,512 | py | import numpy as np
import pandas as pd
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
# 数据加载
train = pd.read_csv('./train.csv')
test = pd.read_csv('./test.csv')
# 使用平均年龄来填充年龄中的nan值
train['Age'].fillna(train['Age'].mean(), inplace=True)
test['Age'].fillna(test['Age'].mean(), inplace=True)
# 使用平均票价填充NAN值
test['Fare'].fillna(test['Fare'].mean(), inplace=True)
# 使用登录最多的港口来填充登录港口的nan值
train['Embarked'].fillna(train['Embarked'].value_counts().reset_index()['index'][0], inplace=True)
test['Embarked'].fillna(train['Embarked'].value_counts().reset_index()['index'][0], inplace=True)
# 特征选择
features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
train_features = train[features]
test_features = test[features]
train_label = train['Survived']
dvec = DictVectorizer(sparse=False)
train_features = dvec.fit_transform(train_features.to_dict(orient='record'))
test_features = dvec.transform(test_features.to_dict(orient='record'))
# Average CV score on the training set was: 0.8462620048961144
exported_pipeline = GradientBoostingClassifier(learning_rate=0.1, max_depth=5, max_features=0.55, min_samples_leaf=5, min_samples_split=3, n_estimators=100, subsample=0.7000000000000001)
exported_pipeline.fit(train_features, train_label)
results = exported_pipeline.predict(test_features)
| [
"hjf358900088@gmail.com"
] | hjf358900088@gmail.com |
0aba478ac216ffa1c19ae158ded7879e4f70ccb7 | 6bd4081120ef7c99dfa01b18bfdc7a59fa704c85 | /ex4.py | e26995c77a007ddbe93cc977e94d8333580ec835 | [] | no_license | Uchicago-Stat-Comp-37810/assignment-2-KumamonYJ | 2cad833e6ac660bb093d849b9c7860280865b9d6 | 7e039f84a60726d361b0c6602618394b6cb0b8dd | refs/heads/master | 2020-04-01T16:23:25.505323 | 2018-10-22T19:08:16 | 2018-10-22T19:08:16 | 153,379,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | #37810 Assignment 2_Yi Jin
#assign variable car
cars = 100
#assign variable space_in_a_car
space_in_a_car = 4.0
#assign variable drivers
drivers = 30
#assign variable passengers
passengers = 90
#assign variable cars_not_driven by minus car by drivers
cars_not_driven = cars - drivers
#assign variable cars_driven which is equal to drivers
cars_driven = drivers
#assign variable carpool_capacity by multiplying cars_driven and space_in_a_car
carpool_capacity = cars_driven * space_in_a_car
#assign variable average_passengers_per_car by dividing passengers by cars_driven
average_passengers_per_car = passengers / cars_driven
#print sentences and variables
print("There are", cars, "cars available.")
#print sentences and variables
print("There are only", drivers, "drivers available.")
#print sentences and variables
print("There will be", cars_not_driven, "empty cars today.")
#print sentences and variables
print("We can transport", carpool_capacity, "people today.")
#print sentences and variables
print("We have", passengers, "to carpool today.")
#print sentences and variables
print("We need to put about", average_passengers_per_car,"in each car.")
print("Study Drills")
print("There is an error because we haven't defined the variable, i.e. we haven't assigned values to car_pool_capacity.")
print("(1) It is not nessary. If it is 4, the type of variables space_in_a_car and carpool_capacity will be floating point.")
| [
"43788544+KumamonYJ@users.noreply.github.com"
] | 43788544+KumamonYJ@users.noreply.github.com |
d0ce197ca4aca3324bc0593090c9645d61d404ab | 3aed0cf735c64aaf4dbe0badfa274c496fca29d6 | /simple_test.py | cf0e72ce532f84e36bbe3fbd5fd92d4cfaa4fa8a | [] | no_license | KamilBabayev/pytest | 4d2316b715f2988d6f82c66b9f5a0dc9100fac66 | da13bbc130e6057366ad21c98b70a59bc94d146b | refs/heads/main | 2023-04-18T02:26:36.559276 | 2021-04-24T08:26:33 | 2021-04-24T08:26:33 | 359,103,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py |
def test_this_passes():
assert 1 == 1
def test_this_fails():
assert 1 == 2
| [
"kamil.babayev@percona.com"
] | kamil.babayev@percona.com |
50482a45f14d167f9dd6e9fc7d00d93c3fcaad60 | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/Geron17Hands/B_PartI/H_Chapter8/C_PCA/D_UsingScikitLearn/index.py | f3808ea9cd7e1c90a935491935c7d8dd01be2ef0 | [] | no_license | nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,414 | py | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Download from finelybook www.finelybook.com
# ing the first d principal components (i.e., the matrix composed of the first d columns
# of VT), as shown in Equation 8-2.
#
# Equation 8-2. Projecting the training set down to d dimensions
# �d‐proj = � · �d
#
# The following Python code projects the training set onto the plane defined by the first
# two principal components:
# W2 = V.T[:, :2]
# X2D = X_centered.dot(W2)
# There you have it! You now know how to reduce the dimensionality of any dataset
# down to any number of dimensions, while preserving as much variance as possible.
#
# Using Scikit-Learn
# Scikit-Learn’s PCA class implements PCA using SVD decomposition just like we did
# before. The following code applies PCA to reduce the dimensionality of the dataset
# down to two dimensions (note that it automatically takes care of centering the data):
# from sklearn.decomposition import PCA
#
# pca = PCA(n_components = 2)
# X2D = pca.fit_transform(X)
#
# After fitting the PCA transformer to the dataset, you can access the principal compo‐
# nents using the components_ variable (note that it contains the PCs as horizontal vec‐
# tors, so, for example, the first principal component is equal to pca.components_.T[:,
# 0]).
#
# Explained Variance Ratio
# Another very useful piece of information is the explained variance ratio of each prin‐
# cipal component, available via the explained_variance_ratio_ variable. It indicates
# the proportion of the dataset’s variance that lies along the axis of each principal com‐
# ponent. For example, let’s look at the explained variance ratios of the first two compo‐
# nents of the 3D dataset represented in Figure 8-2:
# >>> print(pca.explained_variance_ratio_)
# array([ 0.84248607, 0.14631839])
# This tells you that 84.2% of the dataset’s variance lies along the first axis, and 14.6%
# lies along the second axis. This leaves less than 1.2% for the third axis, so it is reason‐
# able to assume that it probably carries little information.
#
#
#
#
# 214 | Chapter 8: Dimensionality Reduction
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Using Scikit-Learn",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class UsingScikitLearn(HierNode):
def __init__(self):
super().__init__("Using Scikit-Learn")
self.add(Content(), "content")
# eof
| [
"lawrence.mcafee@gmail.com"
] | lawrence.mcafee@gmail.com |
cb50337db2d8006a698aab101b52e25241b61b67 | 292437b85108504a7ca91571f26a639a313501b6 | /venv2/lib/python2.7/site-packages/keystoneclient/auth/identity/generic/token.py | 6a5d15b281e8931b3199251c3a6ea2c8f77eef3e | [] | no_license | heekof/monitoring-agent | c86bebcf77091490df7a6b8c881b85fdb2b9e4eb | b1c079efdf2dabe854f2aa3d96f36d2ec7021070 | refs/heads/master | 2021-01-15T15:39:01.512801 | 2016-08-31T20:53:38 | 2016-08-31T20:53:38 | 58,620,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_config import cfg
from keystoneclient import _discover
from keystoneclient.auth.identity.generic import base
from keystoneclient.auth.identity import v2
from keystoneclient.auth.identity import v3
LOG = logging.getLogger(__name__)
def get_options():
return [
cfg.StrOpt('token', secret=True, help='Token to authenticate with'),
]
class Token(base.BaseGenericPlugin):
"""Generic token auth plugin.
:param string token: Token for authentication.
"""
def __init__(self, auth_url, token=None, **kwargs):
super(Token, self).__init__(auth_url, **kwargs)
self._token = token
def create_plugin(self, session, version, url, raw_status=None):
if _discover.version_match((2,), version):
return v2.Token(url, self._token, **self._v2_params)
elif _discover.version_match((3,), version):
return v3.Token(url, self._token, **self._v3_params)
@classmethod
def get_options(cls):
options = super(Token, cls).get_options()
options.extend(get_options())
return options
| [
"bendriss-jaafar@live.fr"
] | bendriss-jaafar@live.fr |
867b3f98c1c1451d19180642f61929115b3606d1 | d4f4bff5d4412abbb73ce534fae0c87ea9a62362 | /model/boarding2/unassigned_integration_settings.py | fdc774918694894dc6fb81064c5daafc67d14b5a | [] | no_license | icorso/wn_api | 4f023905bcf83fd19eb7826191a6fcf66345e38f | b7e558b30d57b62ed3333cbfb7a9359bf954e320 | refs/heads/master | 2023-05-25T11:05:02.203211 | 2021-05-22T15:10:57 | 2021-05-22T15:10:57 | 366,672,359 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,171 | py | # coding: utf-8
from model.serializable import SwaggerSerializable
class UnassignedIntegrationSettings(SwaggerSerializable):
swagger_types = {
'enable_background_validation': 'bool',
'background_validation_url': 'str',
'receipt_page_url': 'str',
'enable_additional_field_xml_response_tag': 'bool',
'enable_supports_apple_pay_xml_response_tag': 'bool',
'enable_supports_google_pay_xml_response_tag': 'bool',
'enable_enable3ds_xml_response_tag': 'bool',
'enable_supported_cards_xml_response_tag': 'bool'
}
attribute_map = {
'enable_background_validation': 'enableBackgroundValidation',
'background_validation_url': 'backgroundValidationUrl',
'receipt_page_url': 'receiptPageUrl',
'enable_additional_field_xml_response_tag': 'enableAdditionalFieldXmlResponseTag',
'enable_supports_apple_pay_xml_response_tag': 'enableSupportsApplePayXmlResponseTag',
'enable_supports_google_pay_xml_response_tag': 'enableSupportsGooglePayXmlResponseTag',
'enable_enable3ds_xml_response_tag': 'enableEnable3dsXmlResponseTag',
'enable_supported_cards_xml_response_tag': 'enableSupportedCardsXmlResponseTag'
}
def __init__(self, enable_background_validation=False, background_validation_url=None, receipt_page_url=None, enable_additional_field_xml_response_tag=True, enable_supports_apple_pay_xml_response_tag=True, enable_supports_google_pay_xml_response_tag=True, enable_enable3ds_xml_response_tag=True, enable_supported_cards_xml_response_tag=True): # noqa: E501
"""UnassignedIntegrationSettings - a model defined in Swagger""" # noqa: E501
self._enable_background_validation = None
self._background_validation_url = None
self._receipt_page_url = None
self._enable_additional_field_xml_response_tag = None
self._enable_supports_apple_pay_xml_response_tag = None
self._enable_supports_google_pay_xml_response_tag = None
self._enable_enable3ds_xml_response_tag = None
self._enable_supported_cards_xml_response_tag = None
self.discriminator = None
if enable_background_validation is not None:
self.enable_background_validation = enable_background_validation
if background_validation_url is not None:
self.background_validation_url = background_validation_url
if receipt_page_url is not None:
self.receipt_page_url = receipt_page_url
if enable_additional_field_xml_response_tag is not None:
self.enable_additional_field_xml_response_tag = enable_additional_field_xml_response_tag
if enable_supports_apple_pay_xml_response_tag is not None:
self.enable_supports_apple_pay_xml_response_tag = enable_supports_apple_pay_xml_response_tag
if enable_supports_google_pay_xml_response_tag is not None:
self.enable_supports_google_pay_xml_response_tag = enable_supports_google_pay_xml_response_tag
if enable_enable3ds_xml_response_tag is not None:
self.enable_enable3ds_xml_response_tag = enable_enable3ds_xml_response_tag
if enable_supported_cards_xml_response_tag is not None:
self.enable_supported_cards_xml_response_tag = enable_supported_cards_xml_response_tag
@property
def enable_background_validation(self):
"""Gets the enable_background_validation of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_background_validation of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_background_validation
@enable_background_validation.setter
def enable_background_validation(self, enable_background_validation):
"""Sets the enable_background_validation of this UnassignedIntegrationSettings.
:param enable_background_validation: The enable_background_validation of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_background_validation = enable_background_validation
@property
def background_validation_url(self):
"""Gets the background_validation_url of this UnassignedIntegrationSettings. # noqa: E501
:return: The background_validation_url of this UnassignedIntegrationSettings. # noqa: E501
:rtype: str
"""
return self._background_validation_url
@background_validation_url.setter
def background_validation_url(self, background_validation_url):
"""Sets the background_validation_url of this UnassignedIntegrationSettings.
:param background_validation_url: The background_validation_url of this UnassignedIntegrationSettings. # noqa: E501
:type: str
"""
self._background_validation_url = background_validation_url
@property
def receipt_page_url(self):
"""Gets the receipt_page_url of this UnassignedIntegrationSettings. # noqa: E501
:return: The receipt_page_url of this UnassignedIntegrationSettings. # noqa: E501
:rtype: str
"""
return self._receipt_page_url
@receipt_page_url.setter
def receipt_page_url(self, receipt_page_url):
"""Sets the receipt_page_url of this UnassignedIntegrationSettings.
:param receipt_page_url: The receipt_page_url of this UnassignedIntegrationSettings. # noqa: E501
:type: str
"""
self._receipt_page_url = receipt_page_url
@property
def enable_additional_field_xml_response_tag(self):
"""Gets the enable_additional_field_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_additional_field_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_additional_field_xml_response_tag
@enable_additional_field_xml_response_tag.setter
def enable_additional_field_xml_response_tag(self, enable_additional_field_xml_response_tag):
"""Sets the enable_additional_field_xml_response_tag of this UnassignedIntegrationSettings.
:param enable_additional_field_xml_response_tag: The enable_additional_field_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_additional_field_xml_response_tag = enable_additional_field_xml_response_tag
@property
def enable_supports_apple_pay_xml_response_tag(self):
"""Gets the enable_supports_apple_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_supports_apple_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_supports_apple_pay_xml_response_tag
@enable_supports_apple_pay_xml_response_tag.setter
def enable_supports_apple_pay_xml_response_tag(self, enable_supports_apple_pay_xml_response_tag):
"""Sets the enable_supports_apple_pay_xml_response_tag of this UnassignedIntegrationSettings.
:param enable_supports_apple_pay_xml_response_tag: The enable_supports_apple_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_supports_apple_pay_xml_response_tag = enable_supports_apple_pay_xml_response_tag
@property
def enable_supports_google_pay_xml_response_tag(self):
"""Gets the enable_supports_google_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_supports_google_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_supports_google_pay_xml_response_tag
@enable_supports_google_pay_xml_response_tag.setter
def enable_supports_google_pay_xml_response_tag(self, enable_supports_google_pay_xml_response_tag):
"""Sets the enable_supports_google_pay_xml_response_tag of this UnassignedIntegrationSettings.
:param enable_supports_google_pay_xml_response_tag: The enable_supports_google_pay_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_supports_google_pay_xml_response_tag = enable_supports_google_pay_xml_response_tag
@property
def enable_enable3ds_xml_response_tag(self):
"""Gets the enable_enable3ds_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_enable3ds_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_enable3ds_xml_response_tag
@enable_enable3ds_xml_response_tag.setter
def enable_enable3ds_xml_response_tag(self, enable_enable3ds_xml_response_tag):
"""Sets the enable_enable3ds_xml_response_tag of this UnassignedIntegrationSettings.
:param enable_enable3ds_xml_response_tag: The enable_enable3ds_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_enable3ds_xml_response_tag = enable_enable3ds_xml_response_tag
@property
def enable_supported_cards_xml_response_tag(self):
"""Gets the enable_supported_cards_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:return: The enable_supported_cards_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:rtype: bool
"""
return self._enable_supported_cards_xml_response_tag
@enable_supported_cards_xml_response_tag.setter
def enable_supported_cards_xml_response_tag(self, enable_supported_cards_xml_response_tag):
"""Sets the enable_supported_cards_xml_response_tag of this UnassignedIntegrationSettings.
:param enable_supported_cards_xml_response_tag: The enable_supported_cards_xml_response_tag of this UnassignedIntegrationSettings. # noqa: E501
:type: bool
"""
self._enable_supported_cards_xml_response_tag = enable_supported_cards_xml_response_tag
| [
"icorso@yandex.ru"
] | icorso@yandex.ru |
3e7ac719e8e3a708da6343ce589edb7478e7f64d | 68d1d3f61de1341d8b873235a4742d9092c7fb9a | /04_python/while.py | c5da681200ef178c6eab2a36bd3e5b4b4e34f981 | [] | no_license | wonseokjung/Deeplearning_coding | a0156a73d263efe67265e978dd3b130c5757aac1 | 5f395843ae7ac1f4147426124ae466d0ccc38e0b | refs/heads/master | 2021-07-04T08:34:38.730212 | 2017-09-28T00:09:10 | 2017-09-28T00:09:10 | 103,654,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | number = 23
running=True
while running:
guess= int(input("Enter an interger:"))
if guess==23:
print("Congra")
running= False # this makes whilte loop to stop
elif guess>number:
print("number is smaller")
else :
print("number is bigger")
else:
print("done") | [
"noreply@github.com"
] | wonseokjung.noreply@github.com |
a94cce2fcc9f305be51aa53edd18b54745182aec | 4da5cf06e6dd68988f9196f2a9781703f46a8dc9 | /hw2.py | 63daaccae4bcdb059febb62e2e3772f3d0018552 | [] | no_license | dsmilo/DATA602 | 64aa2a8d1976152c2c20ac4a48dad54a7421ab44 | cbe27628bb297029da9c33854381ed4c23d6ea2e | refs/heads/master | 2020-07-27T09:24:08.056439 | 2017-01-14T17:56:19 | 2017-01-14T17:56:19 | 67,137,801 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,532 | py | # Dan Smilowitz DATA 602 hw2
#1. fill in this class
# it will need to provide for what happens below in the
# main, so you will at least need a constructor that takes the values as (Brand, Price, Safety Rating),
# a function called showEvaluation, and an attribute carCount
class CarEvaluation:
'A simple class that represents a car evaluation'
#all your logic here
carCount = 0
def __init__(self, brand = '', price = '', safety = 0):
self.brand = brand
self.price = price
self.safety = safety
CarEvaluation.carCount += 1
def showEvaluation(self):
#The Ford has a High price and it's safety is rated a 2
print("The %s has a %s price and its safety is rated a %d" %(self.brand, self.price, self.safety))
#2. fill in this function
# it takes a list of CarEvaluation objects for input and either "asc" or "des"
# if it gets "asc" return a list of car names order by ascending price
# otherwise by descending price
def sortbyprice(car_list, order = ""):
sorted_cars = []
is_desc = True
if order.lower() == "asc": is_desc = False
price_num = {'High': 3, 'Med': 2, 'Low': 1}
car_list.sort(key= lambda x: price_num[x.price], reverse = is_desc)
for i in range(len(car_list)):
sorted_cars.append(car_list[i].brand)
return sorted_cars
#3. fill in this function
# it takes a list for input of CarEvaluation objects and a value to search for
# it returns true if the value is in the safety attribute of an entry on the list,
# otherwise false
def searchforsafety(car_list, car_rating):
found = False
for item in car_list:
if item.safety == car_rating:
found = True
return found
# This is the main of the program. Expected outputs are in comments after the function calls.
if __name__ == "__main__":
eval1 = CarEvaluation("Ford", "High", 2)
eval2 = CarEvaluation("GMC", "Med", 4)
eval3 = CarEvaluation("Toyota", "Low", 3)
print "Car Count = %d" % CarEvaluation.carCount # Car Count = 3
eval1.showEvaluation() #The Ford has a High price and its safety is rated a 2
eval2.showEvaluation() #The GMC has a Med price and its safety is rated a 4
eval3.showEvaluation() #The Toyota has a Low price and its safety is rated a 3
L = [eval1, eval2, eval3]
print sortbyprice(L, "asc"); #[Toyota, GMC, Ford]
print sortbyprice(L, "des"); #[Ford, GMC, Toyota]
print searchforsafety(L, 2); #true
print searchforsafety(L, 1); #false
| [
"dan.smilowitz@gmail.com"
] | dan.smilowitz@gmail.com |
4ba244991deb82858f176ccdded7c3cd02dae5d1 | 49126b6fe54a49a1302a90cc4739ccc86c009fc4 | /p6.py | f25aad0700a0565a7951c1abb59b004b54c421dd | [] | no_license | ericksepulveda/tarea2moreira | effae49c4a06d69848393f81eb825aa7439fa3ac | 567f7e0d41d654b030185ecbf3c1f3175b11b276 | refs/heads/master | 2021-01-10T12:37:16.643098 | 2015-12-10T15:15:16 | 2015-12-10T15:15:16 | 47,720,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from igraph import *
from p6_helpers import *
DATADIR = "redes/"
def main():
g = Graph.Read_GML(DATADIR + "roget.gml")
nodeCount = len(g.vs)
edgeCount = len(g.es)
# Average degree
degrees = g.degree()
degreeSum = reduce(lambda x,y: x+y, degrees)
averageDegree = degreeSum/len(degrees)
# Getting data from original graph
p6magic(g, 'Red Original')
# Original rewired
g.rewire(edgeCount*2)
p6magic(g, 'Red Recableada')
# Erdös-Renyi
g = Graph.Erdos_Renyi(nodeCount, m=edgeCount)
p6magic(g, 'Erdös-Renyi')
# Barabási-Albert
g = Graph.Barabasi(nodeCount, m=averageDegree/2)
p6magic(g, 'Barabási-Albert')
def p6magic(graph, name):
cores = getCores(graph)
modularity = getModularity(graph)
assortativity = graph.assortativity_degree()
printCores(cores, name)
print "Modularidad\t" + str(modularity)
print "Asortatividad\t" + str(assortativity)
if __name__ == "__main__":
main() | [
"erick.sepulveda@alumnos.usm.cl"
] | erick.sepulveda@alumnos.usm.cl |
111c359123ea7ec429f96cd86e1a580960ed6a12 | 91228ff192d381d4f02a35ac2f0a64dfb8e8e0cf | /AStar.py | 982bafb3631ecf35684e2dbc9aec020ada2cbc7f | [] | no_license | gwhealan/422-proj2 | 48cd7cf5b32e3b2e9574c91c779417b590c9e46e | 1e9b51963f4011a169e7f3d893afbc4091b87696 | refs/heads/main | 2023-01-18T23:58:15.912260 | 2020-11-23T06:26:51 | 2020-11-23T06:26:51 | 309,611,663 | 0 | 0 | null | 2020-11-03T07:49:34 | 2020-11-03T07:49:33 | null | UTF-8 | Python | false | false | 2,494 | py | import util
import math
import random
import WordFinder
import Grid
import time
from guppy import hpy
def nullHeuristic(gridState):
#return random.random()
return 1
def aStarSearch(grid, dictionary, heuristic=nullHeuristic, choiceMult=1, choiceMin=1):
visited = [] # Swapped to dict for faster accessing
sorter = util.PriorityQueueWithFunction(heuristic)
sorter.push((grid, 0))
iterations = 0
times_visited = 0
while not sorter.isEmpty():
gridState, depth = sorter.pop()
iterations += 1
#if iterations > 2:
# break
# print(f"iteration {iterations}: testing state at depth {depth} with grid:")
# print(str(gridState))
if gridState in visited:
times_visited += 1
continue
visited.append(gridState)
#print(f"Scanning {gridState}", end='\t')
if gridState.isComplete() and gridState.isValid(dictionary): # fine to test it this way since the python and statement will only test the second case if the first one is true
#print("DONE")
#h = hpy()
#print(h.heap())
#print(f'Found same state {times_visited} times.')
return gridState, iterations
successors = gridState.getNextGridStates(dictionary, int(choiceMult * (depth + 1)) + choiceMin)
random.shuffle(successors)
#print(f"Expanding from {gridState}")
for newState in successors:
#print(newState)
if newState.isValid(dictionary):
sorter.push((newState, depth + 1))
else:
del(newState)
#del(gridState)
#print(f'Found same state {times_visited} times.')
return None, iterations
def main(dictionary):
grid = Grid.Grid(5,5)
#print('------------------------------------')
completedGrid, iterations = aStarSearch(grid, dictionary, choiceMult=1)
print(f'completed grid after {iterations} iterations.\nResult:\n{str(completedGrid)}')
if __name__ == '__main__':
dictionary = WordFinder.WordFinder()
startTime = time.time()
dictionary.importFromFile('5000-words.txt')
endTime = time.time()
print(f'Dictionary loaded in {endTime - startTime} sec.')
#dictionary.importFromList(['is', 'it', 'to', 'an', 'on', 'no'])
print('Beginning crossword maker...')
startTime = time.time()
main(dictionary)
endTime = time.time()
print(f'Crossword created in {endTime - startTime} sec.') | [
"gwhealan@gmail.com"
] | gwhealan@gmail.com |
56ecf246a43940a3d934f1959f20b61dd6024f4c | 580f52ccb533311b1d783dc141e19951c625c7d2 | /scripts/intra_measurements.py | b37ebfd30c129fc56a2e9701a96d43b0459264bd | [] | no_license | luqmaansalie/NISInternetTopology | 17bc81c45f1179367a528d1c31cb5690216a79f1 | 1cb0623fb5580c6b5aba0d8faa6f1e21316425f5 | refs/heads/master | 2020-07-18T07:57:02.470382 | 2019-09-09T22:01:04 | 2019-09-09T22:01:04 | 206,209,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,257 | py | from datetime import datetime
from ripe.atlas.cousteau import (
Ping,
Traceroute,
AtlasSource,
AtlasCreateRequest,
Probe,
Measurement,
AtlasResultsRequest
)
ATLAS_API_KEY = "f9ccf6c6-f942-436a-8f93-ffef8b676236"
def initRequest(country, source, p1, p2, t1, t2):
atlas_request = AtlasCreateRequest(
start_time=datetime.utcnow(),
key=ATLAS_API_KEY,
measurements=[p1, t1, p2, t2],
sources=[source],
is_oneoff=True
)
(is_success, response) = atlas_request.create()
print(str(country) + ": " + str(response))
# ============================================================================
country = "SA"
source = AtlasSource(type="probes", value="33159, 18114, 31523", requested=3)
ping1 = Ping(af=4, target="www.uct.ac.za", description="intra ping1 1 - " + country)
ping2 = Ping(af=4, target="www.uwc.ac.za", description="intra ping2 2 - " + country)
traceroute1 = Traceroute(af=4, target="www.uct.ac.za", description="intra traceroute1 1 - " + country, protocol="ICMP")
traceroute2 = Traceroute(af=4, target="www.uwc.ac.za", description="intra traceroute2 2 - " + country, protocol="ICMP")
initRequest(country, source, ping1, ping2, traceroute1, traceroute2)
# ============================================================================
country = "Egypt"
source = AtlasSource(type="probes", value="35074, 34151, 32206", requested=3)
ping1 = Ping(af=4, target="www.aucegypt.edu", description="intra ping1 3 - " + country)
ping2 = Ping(af=4, target="www.cu.edu.eg", description="intra ping2 4 - " + country)
traceroute1 = Traceroute(af=4, target="www.aucegypt.edu", description="intra traceroute1 3 - " + country, protocol="ICMP")
traceroute2 = Traceroute(af=4, target="www.cu.edu.eg", description="intra traceroute2 4 - " + country, protocol="ICMP")
initRequest(country, source, ping1, ping2, traceroute1, traceroute2)
# ============================================================================
country = "Nigeria"
source = AtlasSource(type="probes", value="51730, 33267, 30090", requested=3)
ping1 = Ping(af=4, target="www.abu.edu.ng", description="intra ping1 5 - " + country)
ping2 = Ping(af=4, target="www.unilag.edu.ng", description="intra ping2 6 - " + country)
traceroute1 = Traceroute(af=4, target="www.abu.edu.ng", description="intra traceroute1 5 - " + country, protocol="ICMP")
traceroute2 = Traceroute(af=4, target="www.unilag.edu.ng", description="intra traceroute2 6 - " + country, protocol="ICMP")
initRequest(country, source, ping1, ping2, traceroute1, traceroute2)
# ============================================================================
country = "Kenya"
source = AtlasSource(type="probes", value="33965, 33752, 33751", requested=3)
ping1 = Ping(af=4, target="www.uonbi.ac.ke", description="intra ping1 7 - " + country)
ping2 = Ping(af=4, target="www.ku.ac.ke", description="intra ping2 8 - " + country)
traceroute1 = Traceroute(af=4, target="www.uonbi.ac.ke", description="intra traceroute1 7 - " + country, protocol="ICMP")
traceroute2 = Traceroute(af=4, target="www.ku.ac.ke", description="intra traceroute2 8 - " + country, protocol="ICMP")
initRequest(country, source, ping1, ping2, traceroute1, traceroute2)
# ============================================================================
country = "Senegal"
source = AtlasSource(type="probes", value="22522, 32258, 22588", requested=3)
ping1 = Ping(af=4, target="www.ucad.sn", description="intra ping1 9 - " + country)
ping2 = Ping(af=4, target="www.ugb.sn", description="intra ping2 10 - " + country)
traceroute1 = Traceroute(af=4, target="www.ucad.sn", description="intra traceroute1 9 - " + country, protocol="ICMP")
traceroute2 = Traceroute(af=4, target="www.ugb.sn", description="intra traceroute2 10 - " + country, protocol="ICMP")
initRequest(country, source, ping1, ping2, traceroute1, traceroute2)
# ============================================================================
country = "Morocco"
source = AtlasSource(type="probes", value="35067, 32925, 30145", requested=3)
ping1 = Ping(af=4, target="www.uaq.ma", description="intra ping1 11 - " + country)
ping2 = Ping(af=4, target="www.uca.ma", description="intra ping2 12 - " + country)
traceroute1 = Traceroute(af=4, target="www.uaq.ma", description="intra traceroute1 11 - " + country, protocol="ICMP")
traceroute2 = Traceroute(af=4, target="www.uca.ma", description="intra traceroute2 12 - " + country, protocol="ICMP")
initRequest(country, source, ping1, ping2, traceroute1, traceroute2)
# ============================================================================
country = "Algeria"
source = AtlasSource(type="probes", value="51462, 15328, 16604", requested=3)
ping1 = Ping(af=4, target="www.univ-tlemcen.dz", description="intra ping1 13 - " + country)
ping2 = Ping(af=4, target="www.univ-bejaia.dz", description="intra ping2 14 - " + country)
traceroute1 = Traceroute(af=4, target="www.univ-tlemcen.dz", description="intra traceroute1 13 - " + country, protocol="ICMP")
traceroute2 = Traceroute(af=4, target="www.univ-bejaia.dz", description="intra traceroute2 14 - " + country, protocol="ICMP")
initRequest(country, source, ping1, ping2, traceroute1, traceroute2)
# ============================================================================
country = "Ghana"
source = AtlasSource(type="probes", value="33031, 14945, 6380", requested=3)
ping1 = Ping(af=4, target="www.ug.edu.gh", description="intra ping1 15 - " + country)
ping2 = Ping(af=4, target="www.knust.edu.gh", description="intra ping2 16 - " + country)
traceroute1 = Traceroute(af=4, target="www.ug.edu.gh", description="intra traceroute1 15 - " + country, protocol="ICMP")
traceroute2 = Traceroute(af=4, target="www.knust.edu.gh", description="intra traceroute2 16 - " + country, protocol="ICMP")
initRequest(country, source, ping1, ping2, traceroute1, traceroute2)
# ============================================================================
country = "Mozambique"
source = AtlasSource(type="probes", value="19574, 14968, 13799", requested=3)
ping1 = Ping(af=4, target="www.uem.mz", description="intra ping1 17 - " + country)
ping2 = Ping(af=4, target="www.up.ac.mz", description="intra ping2 18 - " + country)
traceroute1 = Traceroute(af=4, target="www.uem.mz", description="intra traceroute1 17 - " + country, protocol="ICMP")
traceroute2 = Traceroute(af=4, target="www.up.ac.mz", description="intra traceroute2 18 - " + country, protocol="ICMP")
initRequest(country, source, ping1, ping2, traceroute1, traceroute2)
# ============================================================================
country = "Tanzania"
source = AtlasSource(type="probes", value="50463, 33284, 27442", requested=3)
ping1 = Ping(af=4, target="www.udsm.ac.tz", description="intra ping1 19 - " + country)
ping2 = Ping(af=4, target="www.sua.ac.tz", description="intra ping2 20 - " + country)
traceroute1 = Traceroute(af=4, target="www.udsm.ac.tz", description="intra traceroute1 19 - " + country, protocol="ICMP")
traceroute2 = Traceroute(af=4, target="www.sua.ac.tz", description="intra traceroute2 20 - " + country, protocol="ICMP")
initRequest(country, source, ping1, ping2, traceroute1, traceroute2)
# ============================================================================ | [
"Luqmaan.Salie@bytes.co.za"
] | Luqmaan.Salie@bytes.co.za |
74c6894fa214ce51cd27b0ee0cd70ed9910e3823 | c2cccc43fb8264e9b03a791d12f122067f6e3614 | /likes/templatetags/likes_tags.py | 0979246b16ad35bd869122427389a06224a0ae9c | [] | no_license | cwwjyh/mysite | 1990a8ce9c04b5e9f302fc4071f26d2eb41da2f3 | 36650b9cf4bf918d26eac946ca62796ce8538e14 | refs/heads/master | 2022-06-13T06:10:59.888696 | 2019-04-09T12:07:55 | 2019-04-09T12:07:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | from django import template
from django.contrib.contenttypes.models import ContentType
from ..models import LikeCount, LikeRecord
register = template.Library()
@register.simple_tag
def get_like_count(obj):#获取点赞的数量
content_type = ContentType.objects.get_for_model(obj)
like_count, created = LikeCount.objects.get_or_create(content_type=content_type, object_id=obj.pk)
return like_count.liked_num
@register.simple_tag(takes_context=True)
def get_like_status(context, obj):
content_type = ContentType.objects.get_for_model(obj)
user = context['user']
if not user.is_authenticated: #如果没有登录
return ''
if LikeRecord.objects.filter(content_type=content_type, object_id=obj.pk, user=user).exists():
return 'active'
else:
return ''
@register.simple_tag
def get_content_type(obj):
content_type = ContentType.objects.get_for_model(obj)
return content_type.model | [
"1528842520@qq.com"
] | 1528842520@qq.com |
ca597ff9b8f4bf051adf07819bebf75eec4aabed | 37143a0573d66b054a260cc7be9ce92db9a039ac | /danjaysci/institutions/models.py | a479c8cc0db9cc22219ec0c31fd919b5b8e7cf5d | [] | no_license | danielsenhwong/danjaysci | e39d943022b789d824e483cb6c5e8f1a66fd2bfa | 5e18f53480feda00fb85538ce46da3dc11f74011 | refs/heads/master | 2021-01-01T05:50:11.585836 | 2017-06-28T14:03:42 | 2017-06-28T14:03:42 | 94,346,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,939 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Institution(models.Model):
# Relationships
parent = models.ForeignKey(
'self',
on_delete = models.PROTECT,
blank = True,
null = True,
)
# Attributes
name = models.CharField(max_length = 64)
short_name = models.CharField(max_length = 16)
# Manager
# Functions
def __str__(self):
if self.parent is None:
ustr = '%s' % (self.name)
else:
ustr = '%s, %s' % (self.name, self.parent)
return ustr
# Meta
class Meta:
order_with_respect_to = 'parent'
class Department(models.Model):
institution = models.ForeignKey(
Institution,
on_delete = models.PROTECT
)
chair = models.ForeignKey(
User,
on_delete = models.PROTECT,
)
name = models.CharField(max_length=128)
abbreviation = models.CharField(max_length=8)
def __str__(self):
return '%s (%s)' % (self.name, self.institution.short_name)
class Meta:
order_with_respect_to = 'institution'
class Program(models.Model):
institution = models.ForeignKey(
Institution,
on_delete = models.PROTECT,
)
chair = models.ForeignKey(
User,
on_delete = models.PROTECT,
)
name = models.CharField(max_length=128)
abbreviation = models.CharField(max_length=8)
def __str__(self):
return '%s (%s)' % (self.name, self.institution.short_name)
class Meta:
order_with_respect_to = 'institution'
class Group(models.Model): # Labs, etc.
# Define lists of choices
PRINCIPAL_INVESTIGATOR = 1
CHAIR = 2
DIRECTOR = 4
MANAGER = 3
PRINCIPAL = 5
LEAD_TITLE_CHOICES = (
(DIRECTOR, 'Director'),
(MANAGER, 'Manager'),
(CHAIR, 'Chair'),
(PRINCIPAL_INVESTIGATOR, 'Principal Investigator'),
(PRINCIPAL, 'Principal'),
)
# What department is this group associated with?
department = models.ForeignKey(
Department,
related_name = "group",
on_delete = models.PROTECT,
)
# What program is this group associated with?
program = models.ManyToManyField(Program)
# Who leads the group? E.g. principal investigator, manager, director
lead = models.ForeignKey(
User,
related_name = "group",
on_delete = models.PROTECT,
)
# What is the lead's title? Default = PI
lead_title = models.IntegerField(
choices = LEAD_TITLE_CHOICES,
default = PRINCIPAL_INVESTIGATOR,
)
# What is this group called?
name = models.CharField(max_length = 128)
# Where is this group located? E.g. building and room
location = models.CharField(
max_length = 128,
)
# Build the display string for this group from the group name and the instution(s) it is affiliated with. Every group is required to be associated with a department, but zero or multiple programs.
def __str__(self):
# Start with the group name and the department institution
group_str = '%s, %s' % (self.name, self.department.institution.short_name)
# Now add the program institution(s) if the group is associated with one, but only unique ones via distinct() call. No need to repeat. Add the short_name to the string.
if self.program:
for inst in self.program.all().values('institution').distinct():
group_str += '/%s' % (Institution.objects.get(pk=inst['institution']).short_name)
return group_str
class Meta:
ordering = ['name']
class Funding(models.Model):
# Define lists of choices; potentially split this off later
# List from "Types of Awards" described by Johns Hopkins Office of Research Administration http://www.hopkinsmedicine.org/research/synergy/ora/handbook/handbook_II.html
CONTRACT = 0
COOPERATIVE = 1
DONATION = 2
GRANT = 3
FELLOWSHIP = 4
FUNDING_TYPE_CHOICES = (
(CONTRACT, 'Contract'),
(COOPERATIVE, 'Cooperative Agreement'),
(DONATION, 'Donation/Gift'),
(GRANT, 'Grant'),
(FELLOWSHIP, 'Fellowship'),
)
# Who was this awarded to?
awarded_to = models.ForeignKey(
User,
on_delete = models.PROTECT,
)
# What type of funding is this? Default = grant
funding_type = models.IntegerField(
choices = FUNDING_TYPE_CHOICES,
default = GRANT,
)
# Where did this funding come from? E.g. agency (NIH, NSF, HHS, USDA, DoD, individual, etc.)
funding_source = models.CharField(max_length=64)
# What is the name of this award?
name = models.CharField(
verbose_name = "Award name",
max_length = 128,
)
short_name = models.CharField(max_length=32)
# What is the award number? N/A for none
number = models.CharField(
verbose_name = "Award number",
max_length = 64,
)
# What is the internal grant code for this award? Tufts uses a DeptID-GrantCode format.
dept_id = models.CharField(
verbose_name = 'Dept ID',
max_length = 7
)
grant_code = models.CharField(
max_length = 6,
blank = True
)
# What are the active dates of this funding?
start_date = models.DateField()
end_date = models.DateField()
# What is the abstract or description of this award?
abstract = models.TextField(
blank = True,
)
def __str__(self):
return '%s-%s (%s %s, %s)' % (self.dept_id, self.grant_code, self.short_name, Funding.FUNDING_TYPE_CHOICES[self.funding_type][1], self.awarded_to)
def deptid_grantcode(self):
return '%s-%s' & (self.dept_id, self.grant_code)
class Meta:
verbose_name_plural = "Funding"
ordering = ['-end_date']
| [
"danielsenhwong@gmail.com"
] | danielsenhwong@gmail.com |
64a694d6c95f4ea237880b1e4abbce5a36e03343 | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-integrations/connectors/destination-weaviate/destination_weaviate/client.py | 3ba83b2a4a53a92af1f8413bd85c69ca41b056c9 | [
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 6,094 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import logging
import time
import uuid
from dataclasses import dataclass
from typing import Any, List, Mapping, MutableMapping
import weaviate
from .utils import generate_id, parse_id_schema, parse_vectors, stream_to_class_name
@dataclass
class BufferedObject:
id: str
properties: Mapping[str, Any]
vector: List[Any]
class_name: str
class WeaviatePartialBatchError(Exception):
pass
class Client:
def __init__(self, config: Mapping[str, Any], schema: Mapping[str, str]):
self.client = self.get_weaviate_client(config)
self.config = config
self.batch_size = int(config.get("batch_size", 100))
self.schema = schema
self.vectors = parse_vectors(config.get("vectors"))
self.id_schema = parse_id_schema(config.get("id_schema"))
self.buffered_objects: MutableMapping[str, BufferedObject] = {}
self.objects_with_error: MutableMapping[str, BufferedObject] = {}
def buffered_write_operation(self, stream_name: str, record: MutableMapping):
if self.id_schema.get(stream_name, "") in record:
id_field_name = self.id_schema.get(stream_name, "")
record_id = generate_id(record.get(id_field_name))
del record[id_field_name]
else:
if "id" in record:
record_id = generate_id(record.get("id"))
del record["id"]
# Weaviate will throw an error if you try to store a field with name _id
elif "_id" in record:
record_id = generate_id(record.get("_id"))
del record["_id"]
else:
record_id = uuid.uuid4()
record_id = str(record_id)
# TODO support nested objects instead of converting to json string when weaviate supports this
for k, v in record.items():
if self.schema[stream_name].get(k, "") == "jsonify":
record[k] = json.dumps(v)
# Handling of empty list that's not part of defined schema otherwise Weaviate throws invalid string property
if isinstance(v, list) and len(v) == 0 and k not in self.schema[stream_name]:
record[k] = ""
missing_properties = set(self.schema[stream_name].keys()).difference(record.keys()).discard("id")
for prop in missing_properties or []:
record[prop] = None
additional_props = set(record.keys()).difference(self.schema[stream_name].keys())
for prop in additional_props or []:
if isinstance(record[prop], dict):
record[prop] = json.dumps(record[prop])
if isinstance(record[prop], list) and len(record[prop]) > 0 and isinstance(record[prop][0], dict):
record[prop] = json.dumps(record[prop])
# Property names in Weaviate have to start with lowercase letter
record = {k[0].lower() + k[1:]: v for k, v in record.items()}
vector = None
if stream_name in self.vectors:
vector_column_name = self.vectors.get(stream_name)
vector = record.get(vector_column_name)
del record[vector_column_name]
class_name = stream_to_class_name(stream_name)
self.client.batch.add_data_object(record, class_name, record_id, vector=vector)
self.buffered_objects[record_id] = BufferedObject(record_id, record, vector, class_name)
if self.client.batch.num_objects() >= self.batch_size:
self.flush()
def flush(self, retries: int = 3):
if len(self.objects_with_error) > 0 and retries == 0:
error_msg = f"Objects had errors and retries failed as well. Object IDs: {self.objects_with_error.keys()}"
raise WeaviatePartialBatchError(error_msg)
results = self.client.batch.create_objects()
self.objects_with_error.clear()
for result in results:
errors = result.get("result", {}).get("errors", [])
if errors:
obj_id = result.get("id")
self.objects_with_error[obj_id] = self.buffered_objects.get(obj_id)
logging.info(f"Object {obj_id} had errors: {errors}. Going to retry.")
for buffered_object in self.objects_with_error.values():
self.client.batch.add_data_object(
buffered_object.properties, buffered_object.class_name, buffered_object.id, buffered_object.vector
)
if len(self.objects_with_error) > 0 and retries > 0:
logging.info("sleeping 2 seconds before retrying batch again")
time.sleep(2)
self.flush(retries - 1)
self.buffered_objects.clear()
def delete_stream_entries(self, stream_name: str):
class_name = stream_to_class_name(stream_name)
try:
original_schema = self.client.schema.get(class_name=class_name)
self.client.schema.delete_class(class_name=class_name)
logging.info(f"Deleted class {class_name}")
self.client.schema.create_class(original_schema)
logging.info(f"Recreated class {class_name}")
except weaviate.exceptions.UnexpectedStatusCodeException as e:
if e.message.startswith("Get schema! Unexpected status code: 404"):
logging.info(f"Class {class_name} did not exist.")
else:
raise e
@staticmethod
def get_weaviate_client(config: Mapping[str, Any]) -> weaviate.Client:
url, username, password = config.get("url"), config.get("username"), config.get("password")
if username and not password:
raise Exception("Password is required when username is set")
if password and not username:
raise Exception("Username is required when password is set")
if username and password:
credentials = weaviate.auth.AuthClientPassword(username, password)
return weaviate.Client(url=url, auth_client_secret=credentials)
return weaviate.Client(url=url, timeout_config=(2, 2))
| [
"noreply@github.com"
] | thomas-vl.noreply@github.com |
2f5083100381596f3e9acb9a0aa0486109c23644 | 4df60458287c16a8353c3771f380168c7058fa7c | /build/turtlebot_simulator/turtlebot_gazebo/catkin_generated/pkg.develspace.context.pc.py | 0f0af2934db83deb447a4e4611d731e73fa6e233 | [] | no_license | Aslanfmh65/slam_mapping_ros_package | ed19b27981748eda41a0fa6a5c7dacf1127a9128 | ed020eeacd100585758c47c203e4af18d2089f93 | refs/heads/master | 2020-05-18T16:23:28.906883 | 2019-07-12T05:58:24 | 2019-07-12T05:58:24 | 184,523,327 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot_gazebo"
PROJECT_SPACE_DIR = "/home/robond/Desktop/myrobot/catkin_ws/devel"
PROJECT_VERSION = "2.2.3"
| [
"aslanfmh@gmail.com"
] | aslanfmh@gmail.com |
3e95f067fba14f5bd1ebdb04147f9f4ed532c262 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/60d6f99657e4479ab9beda33d53f774e.py | 3a8da1104c746329450661a00dc2b7bf64a87b09 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 227 | py | #test
def hey(string):
if string.isupper():
return 'Whoa, chill out!'
elif len(string) > 0 and string[-1] == '?':
return 'Sure.'
elif len(string.strip()) == 0:
return 'Fine. Be that way!'
else:
return 'Whatever.'
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
50a412c2dd20b7268687cbf8965aa391bc61789a | 354d5deebef630d56d9fe86189fbdc0774a09de8 | /lib/python3.6/site-packages/django/db/backends/base/base.py | 2487ac6f6933051a4d096e8f780309ee223f8199 | [] | no_license | VKhayretdinov/accountant | f0564aa13f091334c0be3c83e10dd047d40943e0 | 681d98d5cd0c7a7048f4802b60935f30859fa0d9 | refs/heads/master | 2020-04-19T22:10:20.650068 | 2019-02-01T02:55:33 | 2019-02-01T02:55:33 | 168,462,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,323 | py | import copy
import time
import warnings
from collections import deque
from contextlib import contextmanager
import _thread
import pytz
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import utils
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.backends.signals import connection_created
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, DatabaseErrorWrapper
from django.utils import timezone
from django.utils.functional import cached_property
NO_DB_ALIAS = '__no_db__'
class BaseDatabaseWrapper:
"""Represent a database connection."""
# Mapping of Field objects to their column types.
data_types = {}
# Mapping of Field objects to their SQL suffix such as AUTOINCREMENT.
data_types_suffix = {}
# Mapping of Field objects to their SQL for CHECK constraints.
data_type_check_constraints = {}
ops = None
vendor = 'unknown'
display_name = 'unknown'
SchemaEditorClass = None
# Classes instantiated in __init__().
client_class = None
creation_class = None
features_class = None
introspection_class = None
ops_class = None
validation_class = BaseDatabaseValidation
queries_limit = 9000
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# Connection related attributes.
# The underlying database connection.
self.connection = None
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.settings_dict = settings_dict
self.alias = alias
# Query logging in debug mode or when explicitly enabled.
self.queries_log = deque(maxlen=self.queries_limit)
self.force_debug_cursor = False
# Transaction related attributes.
# Tracks if the connection is in autocommit mode. Per PEP 249, by
# default, it isn't.
self.autocommit = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# Increment to generate unique savepoint ids.
self.savepoint_state = 0
# List of savepoints created by 'atomic'.
self.savepoint_ids = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes.
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
# Thread-safety related attributes.
self.allow_thread_sharing = allow_thread_sharing
self._thread_ident = _thread.get_ident()
# A list of no-argument functions to run when the transaction commits.
# Each entry is an (sids, func) tuple, where sids is a set of the
# active savepoint IDs when this function was registered.
self.run_on_commit = []
# Should we run the on-commit hooks the next time set_autocommit(True)
# is called?
self.run_commit_hooks_on_set_autocommit_on = False
# A stack of wrappers to be invoked around execute()/executemany()
# calls. Each entry is a function taking five arguments: execute, sql,
# params, many, and context. It's the function's responsibility to
# call execute(sql, params, many, context).
self.execute_wrappers = []
self.client = self.client_class(self)
self.creation = self.creation_class(self)
self.features = self.features_class(self)
self.introspection = self.introspection_class(self)
self.ops = self.ops_class(self)
self.validation = self.validation_class(self)
def ensure_timezone(self):
"""
Ensure the connection's timezone is set to `self.timezone_name` and
return whether it changed or not.
"""
return False
@cached_property
def timezone(self):
"""
Time zone for datetimes stored as naive values in the database.
Return a tzinfo object or None.
This is only needed when time zone support is enabled and the database
doesn't support time zones. (When the database supports time zones,
the adapter handles aware datetimes so Django doesn't need to.)
"""
if not settings.USE_TZ:
return None
elif self.features.supports_timezones:
return None
elif self.settings_dict['TIME_ZONE'] is None:
return timezone.utc
else:
return pytz.timezone(self.settings_dict['TIME_ZONE'])
@cached_property
def timezone_name(self):
"""
Name of the time zone of the database connection.
"""
if not settings.USE_TZ:
return settings.TIME_ZONE
elif self.settings_dict['TIME_ZONE'] is None:
return 'UTC'
else:
return self.settings_dict['TIME_ZONE']
@property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
@property
def queries(self):
if len(self.queries_log) == self.queries_log.maxlen:
warnings.warn(
"Limit for query logging exceeded, only the last {} queries "
"will be returned.".format(self.queries_log.maxlen))
return list(self.queries_log)
# ##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Return a dict of parameters suitable for get_new_connection."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method')
def get_new_connection(self, conn_params):
"""Open a connection to the database."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method')
def init_connection_state(self):
"""Initialize the database connection settings."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method')
def create_cursor(self, name=None):
"""Create a cursor. Assume that a connection is established."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')
# ##### Backend-specific methods for creating connections #####
def connect(self):
"""Connect to the database. Assume that the connection is closed."""
# Check for invalid configurations.
self.check_settings()
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.needs_rollback = False
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.time() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.set_autocommit(self.settings_dict['AUTOCOMMIT'])
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
self.run_on_commit = []
def check_settings(self):
if self.settings_dict['TIME_ZONE'] is not None:
if not settings.USE_TZ:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because USE_TZ is "
"False." % self.alias)
elif self.features.supports_timezones:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because its engine "
"handles time zones conversions natively." % self.alias)
def ensure_connection(self):
"""Guarantee that a connection to the database is established."""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
# ##### Backend-specific wrappers for PEP-249 connection methods #####
def _prepare_cursor(self, cursor):
"""
Validate the connection is usable and perform database cursor wrapping.
"""
self.validate_thread_sharing()
if self.queries_logged:
wrapped_cursor = self.make_debug_cursor(cursor)
else:
wrapped_cursor = self.make_cursor(cursor)
return wrapped_cursor
def _cursor(self, name=None):
self.ensure_connection()
with self.wrap_database_errors:
return self._prepare_cursor(self.create_cursor(name))
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
# ##### Generic wrappers for PEP-249 connection methods #####
def cursor(self):
"""Create a cursor, opening a connection if necessary."""
return self._cursor()
def commit(self):
"""Commit a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
# A successful commit means that the database connection works.
self.errors_occurred = False
self.run_commit_hooks_on_set_autocommit_on = True
def rollback(self):
"""Roll back a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
# A successful rollback means that the database connection works.
self.errors_occurred = False
self.needs_rollback = False
self.run_on_commit = []
def close(self):
"""Close the connection to the database."""
self.validate_thread_sharing()
self.run_on_commit = []
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
if self.closed_in_transaction or self.connection is None:
return
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
# ##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
# ##### Generic savepoint management methods #####
def savepoint(self):
"""
Create a savepoint inside the current transaction. Return an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = _thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Roll back to a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
# Remove any callbacks registered while this savepoint was active.
self.run_on_commit = [
(sids, func) for (sids, func) in self.run_on_commit if sid not in sids
]
def savepoint_commit(self, sid):
"""
Release a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
def clean_savepoints(self):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
# ##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method')
# ##### Generic transaction management methods #####
def get_autocommit(self):
"""Get the autocommit state."""
self.ensure_connection()
return self.autocommit
def set_autocommit(self, autocommit, force_begin_transaction_with_broken_autocommit=False):
"""
Enable or disable autocommit.
The usual way to start a transaction is to turn autocommit off.
SQLite does not properly start a transaction when disabling
autocommit. To avoid this buggy behavior and to actually enter a new
transaction, an explcit BEGIN is required. Using
force_begin_transaction_with_broken_autocommit=True will issue an
explicit BEGIN with SQLite. This option will be ignored for other
backends.
"""
self.validate_no_atomic_block()
self.ensure_connection()
start_transaction_under_autocommit = (
force_begin_transaction_with_broken_autocommit and not autocommit and
self.features.autocommits_when_autocommit_is_off
)
if start_transaction_under_autocommit:
self._start_transaction_under_autocommit()
else:
self._set_autocommit(autocommit)
self.autocommit = autocommit
if autocommit and self.run_commit_hooks_on_set_autocommit_on:
self.run_and_clear_commit_hooks()
self.run_commit_hooks_on_set_autocommit_on = False
def get_rollback(self):
"""Get the "needs rollback" flag -- for *advanced use* only."""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""Raise an error if an atomic block is active."""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block.")
# ##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Disable foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
# ##### Connection termination handling #####
def is_usable(self):
"""
Test if the database connection is usable.
This method may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method")
def close_if_unusable_or_obsolete(self):
"""
Close the current connection if unrecoverable errors have occurred
or if it outlived its maximum age.
"""
if self.connection is not None:
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
self.close()
return
# If an exception other than DataError or IntegrityError occurred
# since the last commit / rollback, check if the connection works.
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
else:
self.close()
return
if self.close_at is not None and time.time() >= self.close_at:
self.close()
return
# ##### Thread safety handling #####
def validate_thread_sharing(self):
"""
Validate that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raise an exception if the validation fails.
"""
if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):
raise DatabaseError(
"DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, _thread.get_ident())
)
# ##### Miscellaneous #####
def prepare_database(self):
"""
Hook to do any database check or preparation, generally called before
migrating a project or an app.
"""
pass
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def chunked_cursor(self):
"""
Return a cursor that tries to avoid caching in the database (if
supported by the database), otherwise return a regular cursor.
"""
return self.cursor()
def make_debug_cursor(self, cursor):
"""Create a cursor that logs all queries in self.queries_log."""
return utils.CursorDebugWrapper(cursor, self)
def make_cursor(self, cursor):
"""Create a cursor without debug logging."""
return utils.CursorWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provide a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
try:
with self.cursor() as cursor:
yield cursor
finally:
if must_close:
self.close()
@property
def _nodb_connection(self):
"""
Return an alternative connection to be used when there is no need to
access the blog database, specifically for test db creation/deletion.
This also prevents the production database from being exposed to
potential child threads while (or after) the test database is destroyed.
Refs #10868, #17786, #16969.
"""
return self.__class__(
{**self.settings_dict, 'NAME': None},
alias=NO_DB_ALIAS,
allow_thread_sharing=False,
)
def _start_transaction_under_autocommit(self):
"""
Only required when autocommits_when_autocommit_is_off = True.
"""
raise NotImplementedError(
'subclasses of BaseDatabaseWrapper may require a '
'_start_transaction_under_autocommit() method'
)
def schema_editor(self, *args, **kwargs):
"""
Return a new instance of this backend's SchemaEditor.
"""
if self.SchemaEditorClass is None:
raise NotImplementedError(
'The SchemaEditorClass attribute of this database wrapper is still None')
return self.SchemaEditorClass(self, *args, **kwargs)
def on_commit(self, func):
if self.in_atomic_block:
# Transaction in progress; save for execution on commit.
self.run_on_commit.append((set(self.savepoint_ids), func))
elif not self.get_autocommit():
raise TransactionManagementError('on_commit() cannot be used in manual transaction management')
else:
# No transaction in progress and in autocommit mode; execute
# immediately.
func()
def run_and_clear_commit_hooks(self):
self.validate_no_atomic_block()
current_run_on_commit = self.run_on_commit
self.run_on_commit = []
while current_run_on_commit:
sids, func = current_run_on_commit.pop(0)
func()
@contextmanager
def execute_wrapper(self, wrapper):
"""
Return a context manager under which the wrapper is applied to suitable
database query executions.
"""
self.execute_wrappers.append(wrapper)
try:
yield
finally:
self.execute_wrappers.pop()
def copy(self, alias=None, allow_thread_sharing=None):
"""
Return a copy of this connection.
For tests that require two connections to the same database.
"""
settings_dict = copy.deepcopy(self.settings_dict)
if alias is None:
alias = self.alias
if allow_thread_sharing is None:
allow_thread_sharing = self.allow_thread_sharing
return type(self)(settings_dict, alias, allow_thread_sharing)
| [
"flloppie@gmail.com"
] | flloppie@gmail.com |
80f8c431d1b474ac08e8b673e9393bc4a84c4003 | fed18966525169edf96e6178e36c6fb5ab5bfe5c | /easyTools/print_zh.py | d75657157b6f737a0b168fdf91a3fe9c5d45d5e0 | [] | no_license | huashuolee/borqs_stress | cef50c37b0dc0abdfcecd4f5de90925a61e53e88 | e3375b1032ec5a0dc625dc04c4924192ffc90f26 | refs/heads/master | 2021-01-19T01:37:11.626225 | 2019-07-12T13:21:25 | 2019-07-12T13:21:25 | 11,167,198 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | import os
import sys
print sys.argv[1]
os.mkdir(sys.argv[1])
| [
"huashuolee@gmail.com"
] | huashuolee@gmail.com |
bf64c9862aa6fd295ce0cc911835562fde0bac8f | 55fc41d645e2f2cb4e94eaeb01c21a8f36b522e3 | /data_processing/split_dataset.py | b6b410c63593ed0c4727101c19b45e3069e4d2bb | [] | no_license | andreiqv/pytorch_scale_classifier | 6c4515127ee9ad182242cc429326ed99984c2398 | 9448690ab0a2c5e9ec4c235ff85360be22572949 | refs/heads/master | 2020-04-04T17:34:27.169290 | 2018-11-08T09:24:35 | 2018-11-08T09:24:35 | 156,126,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,137 | py | import os
import sys
import random
if os.path.exists('.local'):
src_dir = '/w/WORK/ineru/06_scales/_dataset/copy/'
dst_dir = '/w/WORK/ineru/06_scales/_dataset/splited/'
else:
src_dir = '/home/andrei/Data/Datasets/Scales/classifier_dataset_181018/'
dst_dir = '/home/andrei/Data/Datasets/Scales/splited/'
parts = ['train', 'valid', 'test']
def copy_files_to_subdirs(src_dir, dst_dir, parts, ratio=[1,1,1]):
src_dir = src_dir.rstrip('/')
dst_dir = dst_dir.rstrip('/')
os.system('mkdir -p {}'.format(dst_dir))
for p in parts:
os.system('mkdir -p {}'.format(dst_dir + '/' + p))
subdirs = os.listdir(src_dir)
for class_name in subdirs:
subdir = src_dir + '/' + class_name
if not os.path.isdir(subdir): continue
file_names = os.listdir(subdir)
if len(file_names) == 0:
print('{0} - empty subdir'.format(class_name))
continue
# calculate train, valid and test sizes
num_files = len(file_names)
num_valid = num_files * ratio[1] // sum(ratio)
num_test = num_files * ratio[2] // sum(ratio)
num_train = num_files - num_valid - num_test
min_num_train = 0 # if 0, then do nothing
if min_num_train > 0:
if num_train < min_num_train:
(num_train, num_valid, num_test) = (num_files, 0, 0)
# SHUFFLE OR SORT
random.shuffle(file_names)
#file_names.sort()
files = dict()
files['train'] = file_names[ : num_train]
files['valid'] = file_names[num_train : num_train + num_valid]
files['test'] = file_names[num_train + num_valid : ]
print('[{}] - {} - {}:{}:{}'.\
format(class_name, num_files, num_train, num_valid, num_test))
#print('train:valid:test = ', len(files['train']),\
# len(files['valid']), len(files['test']))
for part in parts:
cmd = 'mkdir -p {}'.format(dst_dir + '/' + part + '/' + class_name)
os.system(cmd)
#print(cmd)
for file_name in files[part]:
src_path = subdir + '/' + file_name
dst_path = dst_dir + '/' + part + '/' + class_name + '/' + file_name
cmd = 'cp {} {}'.format(src_path, dst_path)
os.system(cmd)
#print(cmd)
if __name__ == '__main__':
copy_files_to_subdirs(src_dir, dst_dir, parts, ratio=[16,3,1])
| [
"phxv@mail.ru"
] | phxv@mail.ru |
dc5fa1c981ea4cd14cfd074ce4f21cb5cddcf069 | 4cf64e5d029119fc8b43c1d98c9aa2de9fafaff2 | /train/server_agent_train/server.py | f0197a21ef4711db2b97f315c2faba750964e329 | [] | no_license | HengJayWang/distributed_learning | 68d8202d8616f9e3a3bab5e25ec5833c476a1bec | fb12b4acff68bf1e51326a8cc490f2ce1a238f63 | refs/heads/master | 2020-07-13T18:45:47.750511 | 2019-08-29T05:40:41 | 2019-08-29T05:40:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,214 | py | import torch
from torch.autograd import Variable
import argparse
# Model Imports
from model.LeNet import *
from model.AlexNet import *
from model.MLP import *
from model.VGGNet import *
# Socket Imports
from socket_.socket_ import *
from logger import *
class Server(Logger):
def __init__(self, train_args):
# server socket setting
self.server_port_begin = 8080
self.server_socks = []
# agent host port list for testing
self.agent_port_begin = 2048
self.agents_attrs = []
# stored data from agent
self.train_data_nums = [0, 0, 0, 0]
self.all_train_data_nums = 0
self.test_data_nums = [0, 0, 0, 0]
self.all_test_data_nums = 0
# training setting
self.is_first_training = True
self.train_args = train_args
if self.train_args.model is 'LeNet':
self.model = Server_LeNet()
elif self.train_args.model is 'AlexNet':
self.model = Server_AlexNet()
elif self.train_args.model is 'MLP':
self.model = Server_MLP()
elif self.train_args.model is 'VGGNet':
self.model = VGGNet()
# training setup
self.train_args.cuda = not self.train_args.no_cuda and torch.cuda.is_available()
torch.manual_seed(self.train_args.seed) # seeding the CPU for generating random numbers so that the results are
# deterministic
if train_args.cuda:
torch.cuda.manual_seed(self.train_args.seed) # set a random seed for the current GPU
self.model.cuda() # move all model parameters to the GPU
self.optimizer = optim.SGD(self.model.parameters(),
lr=self.train_args.lr,
momentum=self.train_args.momentum)
def conn_to_agents(self):
for i in range(self.train_args.agent_nums):
server_sock = Socket(('localhost', self.server_port_begin + i), True)
self.server_socks.append(server_sock)
for i in range(self.train_args.agent_nums):
self.server_socks[i].accept()
agents_attr = {
'name': 'agent_' + str(i + 1),
'host_port': (self.server_socks[i].addr[0], self.agent_port_begin + i)
}
self.agents_attrs.append(agents_attr)
def get_prev_next_agent(self, agent_idx):
prev_agent_idx = agent_idx - 1
next_agent_idx = agent_idx + 1
if prev_agent_idx == -1:
prev_agent_idx = self.train_args.agent_nums - 1
if next_agent_idx == self.train_args.agent_nums:
next_agent_idx = 0
prev_agent = self.agents_attrs[prev_agent_idx]
next_agent = self.agents_attrs[next_agent_idx]
return prev_agent, next_agent
def get_data_nums(self):
global all_train_data_nums
global all_test_data_nums
for i in range(self.train_args.agent_nums):
self.train_data_nums[i] = self.server_socks[i].recv('data_nums')
self.all_train_data_nums += self.train_data_nums[i]
self.test_data_nums[i] = self.server_socks[i].recv('data_nums')
self.all_test_data_nums += self.test_data_nums[i]
def send_train_args(self):
for i in range(self.train_args.agent_nums):
# send train args to agent
self.server_socks[i].send(self.train_args, 'train_args')
# send agent IP and distributed port
self.server_socks[i].send(self.agents_attrs[i]['host_port'], 'cur_host_port')
def send_prev_next_agent_attrs(self, agent_idx):
# get previous and next agent attributes
prev_agent_attrs, next_agent_attrs = self.get_prev_next_agent(agent_idx)
# send prev_agent_attrs, next_agent_attrs to agent
if self.is_first_training:
prev_agent_attrs = None
self.server_socks[agent_idx].send((prev_agent_attrs, next_agent_attrs), 'prev_next_agent_attrs')
# VERY IMPORTANT !!! server is waiting for previos agent sending model snapshot to current agent
if not self.is_first_training:
self.server_socks[agent_idx].sleep()
self.is_first_training = False
def train_with_cur_agent(self, agent_idx, epoch, trained_data_num):
data_nums = self.train_data_nums[agent_idx]
batches = (data_nums - 1) // self.train_args.batch_size + 1
for batch_idx in range(1, batches + 1):
self.optimizer.zero_grad()
# get agent_output and target from agent
agent_output = self.server_socks[agent_idx].recv('agent_output')
target = self.server_socks[agent_idx].recv('target')
# store gradient in agent_output_clone
agent_output_clone = Variable(agent_output).float()
if self.train_args.cuda:
agent_output_clone = agent_output_clone.cuda()
target = target.cuda()
agent_output_clone.requires_grad_()
# server forward
server_output = self.model(agent_output_clone)
loss = F.cross_entropy(server_output, target)
# server backward
loss.backward()
self.optimizer.step()
# send gradient to agent
self.server_socks[agent_idx].send(agent_output_clone.grad.data, 'agent_output_clone_grad')
trained_data_num += len(target)
if batch_idx % self.train_args.log_interval == 0:
print('Train Epoch: {} at {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, self.agents_attrs[agent_idx], trained_data_num, self.all_train_data_nums,
100. * batch_idx / batches, loss.item()))
return trained_data_num
def train_epoch(self, epoch):
self.model.train()
trained_data_num = 0
for i in range(self.train_args.agent_nums):
self.send_prev_next_agent_attrs(i)
print('starting training with' + str(self.agents_attrs[i]))
trained_data_num = self.train_with_cur_agent(i, epoch, trained_data_num)
def test_with_cur_agent(self, agent_idx, test_loss, correct):
data_nums = self.test_data_nums[agent_idx]
batches = (data_nums - 1) // self.train_args.batch_size + 1
for batch_idx in range(1, batches + 1):
# get agent_output and target from agent
agent_output = self.server_socks[agent_idx].recv('agent_output')
target = self.server_socks[agent_idx].recv('target')
agent_output_clone = Variable(agent_output).float()
if self.train_args.cuda:
agent_output_clone = agent_output_clone.cuda()
target = target.cuda()
# server forward
server_output = self.model(agent_output_clone)
loss = F.cross_entropy(server_output, target)
test_loss += loss
pred = server_output.data.max(1)[1]
correct += pred.eq(target.data).cpu().sum()
return test_loss, correct
def test_epoch(self, epoch):
self.model.eval()
test_loss = 0
correct = 0
batches = 0
for i in range(self.train_args.agent_nums):
self.send_prev_next_agent_attrs(i)
test_loss, correct = self.test_with_cur_agent(i, test_loss, correct)
batches += (self.test_data_nums[i] - 1) // self.train_args.batch_size + 1
if epoch is self.train_args.epochs - 1:
self.server_socks[i].send(True, 'is_training_done')
else:
self.server_socks[i].send(False, 'is_training_done')
test_loss /= batches
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, self.all_test_data_nums,
100. * correct / self.all_test_data_nums))
def start_training(self):
self.conn_to_agents()
self.send_train_args()
self.get_data_nums()
for epoch in range(self.train_args.epochs):
# start training and testing
self.train_epoch(epoch=epoch)
self.test_epoch(epoch=epoch)
| [
"z8663z@gmail.com"
] | z8663z@gmail.com |
58ee209fce6973eed2c5ca143bf8703fe8c9cf14 | 26fc5daa6f48b3faad242d1825f395c550dc99c9 | /code_numpy/Test.py | fb7969374cb54b2656fa10317e2c0232078ce35b | [] | no_license | zuswil/AI_Learn | 1ebd8ad22ac118bdeccdd7af54565a0680673602 | 8b00723e89a5c329f17a36068aaff894f47b63f8 | refs/heads/master | 2022-01-10T01:03:02.457742 | 2019-07-26T07:07:42 | 2019-07-26T07:07:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | print("I just want to text")
print("how to do") | [
"42296359+ExplorAction@users.noreply.github.com"
] | 42296359+ExplorAction@users.noreply.github.com |
70f815ceb96e519aeee611764fda1afe1aec5ec4 | bbc9351d567ad21846d547ffe19ce1489dd1a8ae | /ppocr/data/imaug/textdragon_process.py | 4df842fa296b2ec2855175e8d24c216540610928 | [
"Apache-2.0"
] | permissive | Bohrhh/paddleOCR_dragon | 45a0d86beb93979b5421c46abae163feb0b3fcb8 | e4ccf608a228a4f45c7b2a5d103f3fae47d1fc7b | refs/heads/master | 2023-07-25T19:05:56.560047 | 2021-09-05T16:36:10 | 2021-09-05T16:36:10 | 403,359,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,707 | py | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import cv2
import numpy as np
__all__ = ['TextDragonProcessTrain']
class TextDragonProcessTrain(object):
def __init__(self,
img_size=640,
min_crop_size=24,
min_text_size=4,
max_text_size=512,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
downscale=4,
tcl_ratio=0.3,
shrink_ratio_of_width=0.15,
**kwargs):
self.img_size=img_size
self.min_crop_size = min_crop_size
self.min_text_size = min_text_size
self.max_text_size = max_text_size
self.mean = mean * 3 if len(mean)==1 else mean
self.std = std * 3 if len(std)==1 else std
self.downscale = downscale
self.tcl_ratio = tcl_ratio
self.shrink_ratio_of_width = shrink_ratio_of_width
def quad_area(self, poly):
"""
compute area of a polygon
:param poly:
:return:
"""
edge = [(poly[1][0] - poly[0][0]) * (poly[1][1] + poly[0][1]),
(poly[2][0] - poly[1][0]) * (poly[2][1] + poly[1][1]),
(poly[3][0] - poly[2][0]) * (poly[3][1] + poly[2][1]),
(poly[0][0] - poly[3][0]) * (poly[0][1] + poly[3][1])]
return np.sum(edge) / 2.
def gen_quad_from_poly(self, poly):
"""
Generate min area quad from poly.
"""
point_num = poly.shape[0]
min_area_quad = np.zeros((4, 2), dtype=np.float32)
rect = cv2.minAreaRect(poly.astype(
np.int32)) # (center (x,y), (width, height), angle of rotation)
box = np.array(cv2.boxPoints(rect))
first_point_idx = 0
min_dist = 1e4
for i in range(4):
dist = np.linalg.norm(box[(i + 0) % 4] - poly[0]) + \
np.linalg.norm(box[(i + 1) % 4] - poly[point_num // 2 - 1]) + \
np.linalg.norm(box[(i + 2) % 4] - poly[point_num // 2]) + \
np.linalg.norm(box[(i + 3) % 4] - poly[-1])
if dist < min_dist:
min_dist = dist
first_point_idx = i
for i in range(4):
min_area_quad[i] = box[(first_point_idx + i) % 4]
return min_area_quad
def check_and_validate_polys(self, polys, tags, im_size):
"""
check so that the text poly is in the same direction,
and also filter some invalid polygons
:param polys:
:param tags:
:return:
"""
(h, w) = im_size
if polys.shape[0] == 0:
return polys, np.array([]), np.array([])
polys[:, :, 0] = np.clip(polys[:, :, 0], 0, w - 1)
polys[:, :, 1] = np.clip(polys[:, :, 1], 0, h - 1)
validated_polys = []
validated_tags = []
hv_tags = []
for poly, tag in zip(polys, tags):
quad = self.gen_quad_from_poly(poly)
p_area = self.quad_area(quad)
if abs(p_area) < 1:
print('invalid poly')
continue
if p_area > 0:
if tag == False:
print('poly in wrong direction')
tag = True # reversed cases should be ignore
poly = poly[(0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2,
1), :]
quad = quad[(0, 3, 2, 1), :]
len_w = np.linalg.norm(quad[0] - quad[1]) + np.linalg.norm(quad[3] -
quad[2])
len_h = np.linalg.norm(quad[0] - quad[3]) + np.linalg.norm(quad[1] -
quad[2])
hv_tag = 1
if len_w * 2.0 < len_h:
hv_tag = 0
validated_polys.append(poly)
validated_tags.append(tag)
hv_tags.append(hv_tag)
return np.array(validated_polys), np.array(validated_tags), np.array(
hv_tags)
def crop_area(self,
im,
polys,
tags,
hv_tags,
txts,
crop_background=False,
max_tries=25):
"""
make random crop from the input image
:param im:
:param polys: [b,4,2]
:param tags:
:param crop_background:
:param max_tries: 50 -> 25
:return:
"""
h, w, _ = im.shape
pad_h = h // 10
pad_w = w // 10
h_array = np.zeros((h + pad_h * 2), dtype=np.int32)
w_array = np.zeros((w + pad_w * 2), dtype=np.int32)
for poly in polys:
poly = np.round(poly, decimals=0).astype(np.int32)
minx = np.min(poly[:, 0])
maxx = np.max(poly[:, 0])
w_array[minx + pad_w:maxx + pad_w] = 1
miny = np.min(poly[:, 1])
maxy = np.max(poly[:, 1])
h_array[miny + pad_h:maxy + pad_h] = 1
# ensure the cropped area not across a text
h_axis = np.where(h_array == 0)[0]
w_axis = np.where(w_array == 0)[0]
if len(h_axis) == 0 or len(w_axis) == 0:
return im, polys, tags, hv_tags, txts
for i in range(max_tries):
xx = np.random.choice(w_axis, size=2)
xmin = np.min(xx) - pad_w
xmax = np.max(xx) - pad_w
xmin = np.clip(xmin, 0, w - 1)
xmax = np.clip(xmax, 0, w - 1)
yy = np.random.choice(h_axis, size=2)
ymin = np.min(yy) - pad_h
ymax = np.max(yy) - pad_h
ymin = np.clip(ymin, 0, h - 1)
ymax = np.clip(ymax, 0, h - 1)
if xmax - xmin < self.min_crop_size or \
ymax - ymin < self.min_crop_size:
continue
if polys.shape[0] != 0:
poly_axis_in_area = (polys[:, :, 0] >= xmin) & (polys[:, :, 0] <= xmax) \
& (polys[:, :, 1] >= ymin) & (polys[:, :, 1] <= ymax)
selected_polys = np.where(
np.sum(poly_axis_in_area, axis=1) == 4)[0]
else:
selected_polys = []
if len(selected_polys) == 0:
# no text in this area
if crop_background:
txts_tmp = []
for selected_poly in selected_polys:
txts_tmp.append(txts[selected_poly])
txts = txts_tmp
return im[ymin: ymax + 1, xmin: xmax + 1, :], \
polys[selected_polys], tags[selected_polys], hv_tags[selected_polys], txts
else:
continue
im = im[ymin:ymax + 1, xmin:xmax + 1, :]
polys = polys[selected_polys]
tags = tags[selected_polys]
hv_tags = hv_tags[selected_polys]
txts_tmp = []
for selected_poly in selected_polys:
txts_tmp.append(txts[selected_poly])
txts = txts_tmp
polys[:, :, 0] -= xmin
polys[:, :, 1] -= ymin
return im, polys, tags, hv_tags, txts
return im, polys, tags, hv_tags, txts
def generate_direction_map(self, poly_quads, n_char, direction_map):
"""
"""
width_list = []
height_list = []
for quad in poly_quads:
quad_w = (np.linalg.norm(quad[0] - quad[1]) +
np.linalg.norm(quad[2] - quad[3])) / 2.0
quad_h = (np.linalg.norm(quad[0] - quad[3]) +
np.linalg.norm(quad[2] - quad[1])) / 2.0
width_list.append(quad_w)
height_list.append(quad_h)
norm_width = max(sum(width_list) / n_char, 1.0)
average_height = max(sum(height_list) / len(height_list), 1.0)
k = 1
for quad in poly_quads:
direct_vector_full = (
(quad[1] + quad[2]) - (quad[0] + quad[3])) / 2.0
direct_vector = direct_vector_full / (
np.linalg.norm(direct_vector_full) + 1e-6) * norm_width
direction_label = tuple(
map(float,
[direct_vector[0], direct_vector[1], 1.0 / average_height]))
cv2.fillPoly(direction_map,
quad.round().astype(np.int32)[np.newaxis, :, :],
direction_label)
k += 1
return direction_map
def calculate_average_height(self, poly_quads):
"""
"""
height_list = []
for quad in poly_quads:
quad_h = (np.linalg.norm(quad[0] - quad[3]) +
np.linalg.norm(quad[2] - quad[1])) / 2.0
height_list.append(quad_h)
average_height = max(sum(height_list) / len(height_list), 1.0)
return average_height
def generate_tcl_ctc_label(self,
h,
w,
polys,
tags,
text_strs,
ds_ratio,
tcl_ratio=0.3,
shrink_ratio_of_width=0.15):
"""
Generate polygon.
"""
score_map_big = np.zeros(
(
h,
w, ), dtype=np.float32)
h, w = int(h * ds_ratio), int(w * ds_ratio)
polys = polys * ds_ratio
score_map = np.zeros(
(
h,
w, ), dtype=np.float32)
score_label_map = np.zeros(
(
h,
w, ), dtype=np.float32)
tbo_map = np.zeros((h, w, 5), dtype=np.float32)
training_mask = np.ones(
(
h,
w, ), dtype=np.float32)
direction_map = np.ones((h, w, 3)) * np.array([0, 0, 1]).reshape(
[1, 1, 3]).astype(np.float32)
label_idx = 0
score_label_map_text_label_list = []
pos_list, pos_mask, label_list = [], [], []
for poly_idx, poly_tag in enumerate(zip(polys, tags)):
poly = poly_tag[0]
tag = poly_tag[1]
# generate min_area_quad
min_area_quad, center_point = self.gen_min_area_quad_from_poly(poly)
min_area_quad_h = 0.5 * (
np.linalg.norm(min_area_quad[0] - min_area_quad[3]) +
np.linalg.norm(min_area_quad[1] - min_area_quad[2]))
min_area_quad_w = 0.5 * (
np.linalg.norm(min_area_quad[0] - min_area_quad[1]) +
np.linalg.norm(min_area_quad[2] - min_area_quad[3]))
if min(min_area_quad_h, min_area_quad_w) < self.min_text_size * ds_ratio \
or min(min_area_quad_h, min_area_quad_w) > self.max_text_size * ds_ratio:
continue
if tag:
cv2.fillPoly(training_mask,
poly.astype(np.int32)[np.newaxis, :, :], 0.15)
else:
text_label = text_strs[poly_idx]
if len(text_label) < 1:
continue
tcl_poly = self.poly2tcl(poly, tcl_ratio)
tcl_quads = self.poly2quads(tcl_poly)
poly_quads = self.poly2quads(poly)
stcl_quads, quad_index = self.shrink_poly_along_width(
tcl_quads,
shrink_ratio_of_width=shrink_ratio_of_width,
expand_height_ratio=1.0 / tcl_ratio)
cv2.fillPoly(score_map,
np.round(stcl_quads).astype(np.int32), 1.0)
cv2.fillPoly(score_map_big,
np.round(stcl_quads / ds_ratio).astype(np.int32),
1.0)
for idx, quad in enumerate(stcl_quads):
quad_mask = np.zeros((h, w), dtype=np.float32)
quad_mask = cv2.fillPoly(
quad_mask,
np.round(quad[np.newaxis, :, :]).astype(np.int32), 1.0)
tbo_map = self.gen_quad_tbo(poly_quads[quad_index[idx]],
quad_mask, tbo_map)
label_idx += 1
cv2.fillPoly(score_label_map,
np.round(poly_quads).astype(np.int32), label_idx)
# direction info, fix-me
n_char = len(text_label)
direction_map = self.generate_direction_map(poly_quads, n_char,
direction_map)
# use big score_map for smooth tcl lines
score_map_big_resized = cv2.resize(
score_map_big, dsize=None, fx=ds_ratio, fy=ds_ratio)
score_map = np.array(score_map_big_resized > 1e-3, dtype='float32')
return score_map, score_label_map, tbo_map, direction_map, training_mask
def adjust_point(self, poly):
"""
adjust point order.
"""
point_num = poly.shape[0]
if point_num == 4:
len_1 = np.linalg.norm(poly[0] - poly[1])
len_2 = np.linalg.norm(poly[1] - poly[2])
len_3 = np.linalg.norm(poly[2] - poly[3])
len_4 = np.linalg.norm(poly[3] - poly[0])
if (len_1 + len_3) * 1.5 < (len_2 + len_4):
poly = poly[[1, 2, 3, 0], :]
elif point_num > 4:
vector_1 = poly[0] - poly[1]
vector_2 = poly[1] - poly[2]
cos_theta = np.dot(vector_1, vector_2) / (
np.linalg.norm(vector_1) * np.linalg.norm(vector_2) + 1e-6)
theta = np.arccos(np.round(cos_theta, decimals=4))
if abs(theta) > (70 / 180 * math.pi):
index = list(range(1, point_num)) + [0]
poly = poly[np.array(index), :]
return poly
def gen_min_area_quad_from_poly(self, poly):
"""
Generate min area quad from poly.
"""
point_num = poly.shape[0]
min_area_quad = np.zeros((4, 2), dtype=np.float32)
if point_num == 4:
min_area_quad = poly
center_point = np.sum(poly, axis=0) / 4
else:
rect = cv2.minAreaRect(poly.astype(
np.int32)) # (center (x,y), (width, height), angle of rotation)
center_point = rect[0]
box = np.array(cv2.boxPoints(rect))
first_point_idx = 0
min_dist = 1e4
for i in range(4):
dist = np.linalg.norm(box[(i + 0) % 4] - poly[0]) + \
np.linalg.norm(box[(i + 1) % 4] - poly[point_num // 2 - 1]) + \
np.linalg.norm(box[(i + 2) % 4] - poly[point_num // 2]) + \
np.linalg.norm(box[(i + 3) % 4] - poly[-1])
if dist < min_dist:
min_dist = dist
first_point_idx = i
for i in range(4):
min_area_quad[i] = box[(first_point_idx + i) % 4]
return min_area_quad, center_point
def shrink_quad_along_width(self,
quad,
begin_width_ratio=0.,
end_width_ratio=1.):
"""
Generate shrink_quad_along_width.
"""
ratio_pair = np.array(
[[begin_width_ratio], [end_width_ratio]], dtype=np.float32)
p0_1 = quad[0] + (quad[1] - quad[0]) * ratio_pair
p3_2 = quad[3] + (quad[2] - quad[3]) * ratio_pair
return np.array([p0_1[0], p0_1[1], p3_2[1], p3_2[0]])
def shrink_poly_along_width(self,
quads,
shrink_ratio_of_width,
expand_height_ratio=1.0):
"""
shrink poly with given length.
"""
upper_edge_list = []
def get_cut_info(edge_len_list, cut_len):
for idx, edge_len in enumerate(edge_len_list):
cut_len -= edge_len
if cut_len <= 0.000001:
ratio = (cut_len + edge_len_list[idx]) / edge_len_list[idx]
return idx, ratio
for quad in quads:
upper_edge_len = np.linalg.norm(quad[0] - quad[1])
upper_edge_list.append(upper_edge_len)
# length of left edge and right edge.
left_length = np.linalg.norm(quads[0][0] - quads[0][
3]) * expand_height_ratio
right_length = np.linalg.norm(quads[-1][1] - quads[-1][
2]) * expand_height_ratio
shrink_length = min(left_length, right_length,
sum(upper_edge_list)) * shrink_ratio_of_width
# shrinking length
upper_len_left = shrink_length
upper_len_right = sum(upper_edge_list) - shrink_length
left_idx, left_ratio = get_cut_info(upper_edge_list, upper_len_left)
left_quad = self.shrink_quad_along_width(
quads[left_idx], begin_width_ratio=left_ratio, end_width_ratio=1)
right_idx, right_ratio = get_cut_info(upper_edge_list, upper_len_right)
right_quad = self.shrink_quad_along_width(
quads[right_idx], begin_width_ratio=0, end_width_ratio=right_ratio)
out_quad_list = []
if left_idx == right_idx:
out_quad_list.append(
[left_quad[0], right_quad[1], right_quad[2], left_quad[3]])
else:
out_quad_list.append(left_quad)
for idx in range(left_idx + 1, right_idx):
out_quad_list.append(quads[idx])
out_quad_list.append(right_quad)
return np.array(out_quad_list), list(range(left_idx, right_idx + 1))
def prepare_text_label(self, label_str, Lexicon_Table):
"""
Prepare text lablel by given Lexicon_Table.
"""
if len(Lexicon_Table) == 36:
return label_str.lower()
else:
return label_str
def vector_angle(self, A, B):
"""
Calculate the angle between vector AB and x-axis positive direction.
"""
AB = np.array([B[1] - A[1], B[0] - A[0]])
return np.arctan2(*AB)
def theta_line_cross_point(self, theta, point):
"""
Calculate the line through given point and angle in ax + by + c =0 form.
"""
x, y = point
cos = np.cos(theta)
sin = np.sin(theta)
return [sin, -cos, cos * y - sin * x]
def line_cross_two_point(self, A, B):
"""
Calculate the line through given point A and B in ax + by + c =0 form.
"""
angle = self.vector_angle(A, B)
return self.theta_line_cross_point(angle, A)
def average_angle(self, poly):
"""
Calculate the average angle between left and right edge in given poly.
"""
p0, p1, p2, p3 = poly
angle30 = self.vector_angle(p3, p0)
angle21 = self.vector_angle(p2, p1)
return (angle30 + angle21) / 2
def line_cross_point(self, line1, line2):
"""
line1 and line2 in 0=ax+by+c form, compute the cross point of line1 and line2
"""
a1, b1, c1 = line1
a2, b2, c2 = line2
d = a1 * b2 - a2 * b1
if d == 0:
print('Cross point does not exist')
return np.array([0, 0], dtype=np.float32)
else:
x = (b1 * c2 - b2 * c1) / d
y = (a2 * c1 - a1 * c2) / d
return np.array([x, y], dtype=np.float32)
def quad2tcl(self, poly, ratio):
"""
Generate center line by poly clock-wise point. (4, 2)
"""
ratio_pair = np.array(
[[0.5 - ratio / 2], [0.5 + ratio / 2]], dtype=np.float32)
p0_3 = poly[0] + (poly[3] - poly[0]) * ratio_pair
p1_2 = poly[1] + (poly[2] - poly[1]) * ratio_pair
return np.array([p0_3[0], p1_2[0], p1_2[1], p0_3[1]])
def poly2tcl(self, poly, ratio):
"""
Generate center line by poly clock-wise point.
"""
ratio_pair = np.array(
[[0.5 - ratio / 2], [0.5 + ratio / 2]], dtype=np.float32)
tcl_poly = np.zeros_like(poly)
point_num = poly.shape[0]
for idx in range(point_num // 2):
point_pair = poly[idx] + (poly[point_num - 1 - idx] - poly[idx]
) * ratio_pair
tcl_poly[idx] = point_pair[0]
tcl_poly[point_num - 1 - idx] = point_pair[1]
return tcl_poly
def gen_quad_tbo(self, quad, tcl_mask, tbo_map):
"""
Generate tbo_map for give quad.
"""
# upper and lower line function: ax + by + c = 0;
up_line = self.line_cross_two_point(quad[0], quad[1])
lower_line = self.line_cross_two_point(quad[3], quad[2])
quad_h = 0.5 * (np.linalg.norm(quad[0] - quad[3]) +
np.linalg.norm(quad[1] - quad[2]))
quad_w = 0.5 * (np.linalg.norm(quad[0] - quad[1]) +
np.linalg.norm(quad[2] - quad[3]))
# average angle of left and right line.
angle = self.average_angle(quad)
xy_in_poly = np.argwhere(tcl_mask == 1)
for y, x in xy_in_poly:
point = (x, y)
line = self.theta_line_cross_point(angle, point)
cross_point_upper = self.line_cross_point(up_line, line)
cross_point_lower = self.line_cross_point(lower_line, line)
##FIX, offset reverse
upper_offset_x, upper_offset_y = cross_point_upper - point
lower_offset_x, lower_offset_y = cross_point_lower - point
tbo_map[y, x, 0] = upper_offset_y
tbo_map[y, x, 1] = upper_offset_x
tbo_map[y, x, 2] = lower_offset_y
tbo_map[y, x, 3] = lower_offset_x
tbo_map[y, x, 4] = 1.0 / max(min(quad_h, quad_w), 1.0) * 2
return tbo_map
def poly2quads(self, poly):
"""
Split poly into quads.
"""
quad_list = []
point_num = poly.shape[0]
# point pair
point_pair_list = []
for idx in range(point_num // 2):
point_pair = [poly[idx], poly[point_num - 1 - idx]]
point_pair_list.append(point_pair)
quad_num = point_num // 2 - 1
for idx in range(quad_num):
# reshape and adjust to clock-wise
quad_list.append((np.array(point_pair_list)[[idx, idx + 1]]
).reshape(4, 2)[[0, 2, 3, 1]])
return np.array(quad_list)
def rotate_im_poly(self, im, text_polys):
"""
rotate image with 90 / 180 / 270 degre
"""
im_w, im_h = im.shape[1], im.shape[0]
dst_im = im.copy()
dst_polys = []
rand_degree_ratio = np.random.rand()
rand_degree_cnt = 1
if rand_degree_ratio > 0.5:
rand_degree_cnt = 3
for i in range(rand_degree_cnt):
dst_im = np.rot90(dst_im)
rot_degree = -90 * rand_degree_cnt
rot_angle = rot_degree * math.pi / 180.0
n_poly = text_polys.shape[0]
cx, cy = 0.5 * im_w, 0.5 * im_h
ncx, ncy = 0.5 * dst_im.shape[1], 0.5 * dst_im.shape[0]
for i in range(n_poly):
wordBB = text_polys[i]
poly = []
for j in range(4): # 16->4
sx, sy = wordBB[j][0], wordBB[j][1]
dx = math.cos(rot_angle) * (sx - cx) - math.sin(rot_angle) * (
sy - cy) + ncx
dy = math.sin(rot_angle) * (sx - cx) + math.cos(rot_angle) * (
sy - cy) + ncy
poly.append([dx, dy])
dst_polys.append(poly)
return dst_im, np.array(dst_polys, dtype=np.float32)
def __call__(self, data):
input_size = self.img_size
im = data['image']
text_polys = data['polys']
text_tags = data['ignore_tags']
text_strs = data['texts']
h, w, _ = im.shape
text_polys, text_tags, hv_tags = self.check_and_validate_polys(
text_polys, text_tags, (h, w))
if text_polys.shape[0] <= 0:
return None
# set aspect ratio and keep area fix
asp_scales = np.arange(1.0, 1.55, 0.1)
asp_scale = np.random.choice(asp_scales)
if np.random.rand() < 0.5:
asp_scale = 1.0 / asp_scale
asp_scale = math.sqrt(asp_scale)
asp_wx = asp_scale
asp_hy = 1.0 / asp_scale
im = cv2.resize(im, dsize=None, fx=asp_wx, fy=asp_hy)
text_polys[:, :, 0] *= asp_wx
text_polys[:, :, 1] *= asp_hy
h, w, _ = im.shape
if max(h, w) > 2048:
rd_scale = 2048.0 / max(h, w)
im = cv2.resize(im, dsize=None, fx=rd_scale, fy=rd_scale)
text_polys *= rd_scale
h, w, _ = im.shape
if min(h, w) < 16:
return None
# no background
im, text_polys, text_tags, hv_tags, text_strs = self.crop_area(
im,
text_polys,
text_tags,
hv_tags,
text_strs,
crop_background=False)
if text_polys.shape[0] == 0:
return None
# # continue for all ignore case
if np.sum((text_tags * 1.0)) >= text_tags.size:
return None
new_h, new_w, _ = im.shape
if (new_h is None) or (new_w is None):
return None
# resize image
std_ratio = float(input_size) / max(new_w, new_h)
rand_scales = np.array(
[0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.0, 1.0, 1.0, 1.0])
rz_scale = std_ratio * np.random.choice(rand_scales)
im = cv2.resize(im, dsize=None, fx=rz_scale, fy=rz_scale)
text_polys[:, :, 0] *= rz_scale
text_polys[:, :, 1] *= rz_scale
# add gaussian blur
if np.random.rand() < 0.1 * 0.5:
ks = np.random.permutation(5)[0] + 1
ks = int(ks / 2) * 2 + 1
im = cv2.GaussianBlur(im, ksize=(ks, ks), sigmaX=0, sigmaY=0)
# add brighter
if np.random.rand() < 0.1 * 0.5:
im = im * (1.0 + np.random.rand() * 0.5)
im = np.clip(im, 0.0, 255.0)
# add darker
if np.random.rand() < 0.1 * 0.5:
im = im * (1.0 - np.random.rand() * 0.5)
im = np.clip(im, 0.0, 255.0)
# Padding the im to [input_size, input_size]
new_h, new_w, _ = im.shape
if min(new_w, new_h) < input_size * 0.5:
return None
im_padded = np.ones((input_size, input_size, 3), dtype=np.float32)
im_padded[:, :, 2] = self.mean[0] * 255
im_padded[:, :, 1] = self.mean[1] * 255
im_padded[:, :, 0] = self.mean[2] * 255
# Random the start position
del_h = input_size - new_h
del_w = input_size - new_w
sh, sw = 0, 0
if del_h > 1:
sh = int(np.random.rand() * del_h)
if del_w > 1:
sw = int(np.random.rand() * del_w)
# Padding
im_padded[sh:sh + new_h, sw:sw + new_w, :] = im.copy()
text_polys[:, :, 0] += sw
text_polys[:, :, 1] += sh
score_map, score_label_map, border_map, direction_map, training_mask = self.generate_tcl_ctc_label(input_size,
input_size,
text_polys,
text_tags,
text_strs,
1/self.downscale,
self.tcl_ratio,
self.shrink_ratio_of_width)
im_padded[:, :, 2] -= self.mean[0] * 255
im_padded[:, :, 1] -= self.mean[1] * 255
im_padded[:, :, 0] -= self.mean[2] * 255
im_padded[:, :, 2] /= (255.0 * self.std[0])
im_padded[:, :, 1] /= (255.0 * self.std[1])
im_padded[:, :, 0] /= (255.0 * self.std[2])
im_padded = im_padded.transpose((2, 0, 1))
images = im_padded[::-1, :, :]
tcl_maps = score_map[np.newaxis, :, :]
tcl_label_maps = score_label_map[np.newaxis, :, :]
border_maps = border_map.transpose((2, 0, 1))
direction_maps = direction_map.transpose((2, 0, 1))
training_masks = training_mask[np.newaxis, :, :]
data['images'] = images
data['tcl_maps'] = tcl_maps
data['tcl_label_maps'] = tcl_label_maps
data['border_maps'] = border_maps
data['direction_maps'] = direction_maps
data['training_masks'] = training_masks
return data
| [
"21636037@zju.edu.cn"
] | 21636037@zju.edu.cn |
722a7b4700eb424a6ebfe9e64198eb024b63fb23 | 21827bdbcd9151a1359adc47bf1c2c52a0c596b5 | /notification/migrations/0003_auto_20210517_1645.py | aa1446daf1f9fb768f5ac1f5a7f8fcf26b0c88d7 | [] | no_license | Ariesgal2017/versevsverse | 9b4bd890bfadf3079465e571bd8626c44effc951 | 65ba22fcb5827af007c2de6e86af901d87497630 | refs/heads/main | 2023-05-02T07:28:29.295025 | 2021-05-19T10:28:32 | 2021-05-19T10:28:32 | 366,995,948 | 0 | 0 | null | 2021-05-19T09:54:31 | 2021-05-13T09:16:15 | CSS | UTF-8 | Python | false | false | 556 | py | # Generated by Django 3.1.7 on 2021-05-17 16:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('verser', '__first__'),
('notification', '0002_notification_user_notify'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='user_notify',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='usernotify', to='verser.customuser'),
),
]
| [
"ariesgal2017@gmail.com"
] | ariesgal2017@gmail.com |
92466ed129c2a1e1e5cc2f2b49236d481ad3ab0b | e28db61e4b3372ba4ca0147c37382e58f670eace | /numbers_dates_times/accurate_decimal_calcs.py | 8a2680cff82a601e525290e88aa990a92a5e1267 | [] | no_license | stepholdcorn/python-cookbook | 26e9586fc5c7cabc7d79742781256649656fb978 | 2a7133f07c3bca5db4357e9bedaf25447b695b03 | refs/heads/master | 2020-05-18T17:46:53.036933 | 2015-05-11T11:06:03 | 2015-05-11T11:06:03 | 35,214,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | from decimal import Decimal
a = Decimal('4.2')
b = Decimal('2.1')
print(a + b)
print((a + b) == Decimal('6.3')) | [
"stephieoldcorn@gmail.com"
] | stephieoldcorn@gmail.com |
455d14cf9f53cdf563bf65094e78b103076f2743 | 7922714a4fd81acd2dac3875d2dd75a2bf24ef5e | /handlers/inlines/search.py | 57caa66a47de26c22dcbb842b488ae9e5bcde09f | [
"MIT"
] | permissive | hexatester/ut-telegram-bot | 32bf9a20ffaf82a5b6f1420d6bb041249ff93d6c | 20f6f063726913cb6d21e42538103e3498b929a7 | refs/heads/master | 2023-01-20T06:50:30.941786 | 2020-11-18T08:31:03 | 2020-11-18T08:31:03 | 290,542,370 | 0 | 0 | MIT | 2020-09-16T03:09:47 | 2020-08-26T16:02:02 | Python | UTF-8 | Python | false | false | 1,141 | py | from telegram import Update, InlineQuery, InlineQueryResult
from telegram.ext import CallbackContext
from typing import List
from core.utils.inline_query import article
from libs.rss.rss import Rss
from libs.search.search import Search
RSS = Rss()
SEARCH = Search()
EMPTY = article(
title="❌ Tidak ada hasil",
description="",
message_text="Pm @UniversitasTerbukaBot untuk mengakses layanan UT. 😁",
)
def search(update: Update, context: CallbackContext):
inline_query: InlineQuery = update.inline_query
query = inline_query.query
results_list: List[InlineQueryResult] = []
if len(query) > 0:
results_list.extend(SEARCH(query))
results_list.extend(RSS(query))
if not results_list:
if RSS.inline_results:
results_list.extend(RSS.inline_results)
else:
results_list.append(EMPTY)
inline_query.answer(
results_list, switch_pm_text="Bantuan", switch_pm_parameter="inline-help"
)
return -1
inline_query.answer(
results_list, switch_pm_text="Bantuan", switch_pm_parameter="inline-help"
)
return -1
| [
"revolusi147id@gmail.com"
] | revolusi147id@gmail.com |
ee38a6a9e8d4e87aa59aa4e3e3b67cbb2e977008 | 09e8945f2e7c10274dfaeca60f11b9926b8117c3 | /build/turtlebot3_msgs/cmake/turtlebot3_msgs-genmsg-context.py | 2a22ce530c76904a895924a42133f99b8d91c016 | [] | no_license | HimanshuDubara/nav_sys | 84fb5fc5a10cd94664ae6c3b2e5ba03643623d97 | 95aab907953cd5292ca958ba4f859b0847e1a783 | refs/heads/master | 2023-03-28T16:50:10.793537 | 2021-03-06T07:02:08 | 2021-03-06T07:02:08 | 322,593,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/himanshu/nav_sys/src/turtlebot3_msgs/msg/SensorState.msg;/home/himanshu/nav_sys/src/turtlebot3_msgs/msg/VersionInfo.msg;/home/himanshu/nav_sys/src/turtlebot3_msgs/msg/Sound.msg"
services_str = ""
pkg_name = "turtlebot3_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "turtlebot3_msgs;/home/himanshu/nav_sys/src/turtlebot3_msgs/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"himanshudubara2000@gmial.com"
] | himanshudubara2000@gmial.com |
8f8c577a98fec3fb5d6a1d25c2d0f8350c64abb4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2311/60829/306892.py | 9a645be64e8410c702a8a8169df2d59fed3ed6d4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | def dl(x):
res=""
for i in range(len(x)):
if not x[len(x)-1-i]==" ":
break
res=x[0:i+1]
return res
a=[]
b=[int(x) for x in dl(input()).split(" ")]
c=[int(x) for x in dl(input()).split(" ")]
a.append(b)
a.append(c)
aa=[[[10], [8]]]
bb=["0 4 0 20 0 12 0 "]
for i in range(len(aa)):
if aa[i]==a:
a=bb[i]
print(a) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
93bf1e1ff40b2e798962265248af5810c27565a5 | da9ce1d1686b451b582152b69648bf53ffb38e00 | /src/policy_gradient.py | 71c4f973317d6c974e159a533d26ed77b682a2c5 | [] | no_license | GeorgJohn/KickerSimulation | 6f84f67322d8ff963abeaa88cd9c741001442247 | 931b6046e964972fb0824dfb0add16dae662e49c | refs/heads/master | 2021-04-06T09:34:35.842276 | 2018-06-28T10:26:28 | 2018-06-28T10:26:28 | 124,398,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,828 | py | import tensorflow as tf
import numpy as np
import random
import tqdm
from src import Environment_Controller as Env
slim = tf.contrib.slim
def calculate_naive_returns(rewards):
""" Calculates a list of naive returns given a
list of rewards."""
total_returns = np.zeros(len(rewards))
total_return = 0.0
for t in range(len(rewards), 0):
total_return = total_return + rewards
total_returns[t] = total_return
return total_returns
def discount_rewards(rewards, gamma=0.98):
discounted_returns = [0 for _ in rewards]
discounted_returns[-1] = rewards[-1]
for t in range(len(rewards)-2, -1, -1): # iterate backwards
discounted_returns[t] = rewards[t] + discounted_returns[t+1]*gamma
return discounted_returns
def epsilon_greedy_action(action_distribution, epsilon=1e-1):
if random.random() < epsilon:
return np.argmax(np.random.random(
action_distribution.shape))
else:
return np.argmax(action_distribution)
def epsilon_greedy_action_annealed(action_distribution, percentage, epsilon_start=1.0, epsilon_end=1e-2):
annealed_epsilon = epsilon_start*(1.0-percentage) + epsilon_end*percentage
if random.random() < annealed_epsilon:
return np.argmax(np.random.random(action_distribution.shape))
else:
return np.argmax(action_distribution)
class PGAgent(object):
def __init__(self, session, state_size, num_actions, hidden_size_1, hidden_size_2, learning_rate=1e-3,
explore_exploit_setting='epsilon_greedy_0.05'):
self.session = session
self.state_size = state_size
self.num_actions = num_actions
self.hidden_size_1 = hidden_size_1
self.hidden_size_2 = hidden_size_2
self.learning_rate = learning_rate
self.explore_exploit_setting = explore_exploit_setting
self.build_model()
self.build_training()
self.saver = tf.train.Saver()
def build_model(self):
with tf.variable_scope('pg-model'):
self.state = tf.placeholder(shape=[None, self.state_size], dtype=tf.float32)
self.h0 = slim.fully_connected(self.state, self.hidden_size_1, activation_fn=tf.nn.sigmoid)
self.h1 = slim.fully_connected(self.h0, self.hidden_size_2, activation_fn=tf.nn.sigmoid)
self.h2 = slim.fully_connected(self.h1, self.hidden_size_1, activation_fn=tf.nn.sigmoid)
self.output = slim.fully_connected(self.h2, self.num_actions, activation_fn=tf.nn.softmax)
def build_training(self):
self.action_input = tf.placeholder(tf.int32, shape=[None])
self.reward_input = tf.placeholder(tf.float32, shape=[None])
# Select the logits related to the action taken
self.output_index_for_actions = (tf.range(0, tf.shape(self.output)[0]) * tf.shape(self.output)[1]) + \
self.action_input
self.logits_for_actions = tf.gather(tf.reshape(self.output, [-1]), self.output_index_for_actions)
self.loss = - tf.reduce_mean(tf.log(self.logits_for_actions) * self.reward_input)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.train_step = self.optimizer.minimize(self.loss)
def sample_action_from_distribution(self, action_distribution, epsilon_percentage):
# Choose an action based on the action probability
# distribution and an explore vs exploit
if self.explore_exploit_setting == 'greedy':
action = epsilon_greedy_action(action_distribution)
elif self.explore_exploit_setting == 'epsilon_greedy_0.05':
action = epsilon_greedy_action(action_distribution, 0.05)
elif self.explore_exploit_setting == 'epsilon_greedy_0.25':
action = epsilon_greedy_action(action_distribution, 0.25)
elif self.explore_exploit_setting == 'epsilon_greedy_0.50':
action = epsilon_greedy_action(action_distribution, 0.50)
elif self.explore_exploit_setting == 'epsilon_greedy_0.90':
action = epsilon_greedy_action(action_distribution, 0.90)
elif self.explore_exploit_setting == 'epsilon_greedy_annealed_1.0->0.001':
action = epsilon_greedy_action_annealed(action_distribution, epsilon_percentage, 1.0, 0.001)
elif self.explore_exploit_setting == 'epsilon_greedy_annealed_0.5->0.001':
action = epsilon_greedy_action_annealed(action_distribution, epsilon_percentage, 0.5, 0.001)
elif self.explore_exploit_setting == 'epsilon_greedy_annealed_0.25->0.001':
action = epsilon_greedy_action_annealed(action_distribution, epsilon_percentage, 0.25, 0.001)
else:
action = Env.Action.NOOP
return action
def predict_action(self, state, epsilon_percentage):
action_distribution = self.session.run(self.output, feed_dict={self.state: [state]})[0]
action = self.sample_action_from_distribution(action_distribution, epsilon_percentage)
return action
class EpisodeHistory(object):
def __init__(self):
self.states = []
self.actions = []
self.rewards = []
self.state_primes = []
self.discounted_returns = []
def add_to_history(self, state, action, reward, state_prime):
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
self.state_primes.append(state_prime)
class Memory(object):
def __init__(self):
self.states = []
self.actions = []
self.rewards = []
self.state_primes = []
self.discounted_returns = []
def reset_memory(self):
self.states = []
self.actions = []
self.rewards = []
self.state_primes = []
self.discounted_returns = []
def add_episode(self, episode):
self.states += episode.states
self.actions += episode.actions
self.rewards += episode.rewards
self.discounted_returns += episode.discounted_returns
def main():
# Configure Settings
total_episodes = 5000
total_steps_max = 10000
epsilon_stop = 3000
train_frequency = 1
max_episode_length = 600
render_start = 10
should_render = True
explore_exploit_setting = 'greedy'
env = Env.EnvironmentController()
state_size = 5
num_actions = 3
solved = False
with tf.Session() as session:
agent = PGAgent(session=session, state_size=state_size, num_actions=num_actions, hidden_size_1=900,
hidden_size_2=900, explore_exploit_setting=explore_exploit_setting)
session.run(tf.global_variables_initializer())
episode_rewards = []
batch_losses = []
global_memory = Memory()
steps = 0
for i in tqdm.tqdm(range(total_episodes)):
state = env.reset()
state = state[-5:]
episode_reward = 0.0
episode_history = EpisodeHistory()
epsilon_percentage = float(min(i/float(epsilon_stop), 1.0))
for j in range(max_episode_length):
action = agent.predict_action(state, epsilon_percentage)
state_prime, reward, terminal = env.step(action)
state_prime = state_prime[-5:]
if (render_start > 0 and i > render_start and should_render): # or (solved and should_render):
env.render()
episode_history.add_to_history(state, action, reward, state_prime)
state = state_prime
episode_reward += reward
steps += 1
if terminal:
episode_history.discounted_returns = discount_rewards(episode_history.rewards)
global_memory.add_episode(episode_history)
if np.mod(i, train_frequency) == 0:
feed_dict = {agent.reward_input: np.array(global_memory.discounted_returns),
agent.action_input: np.array(global_memory.actions),
agent.state: np.array(global_memory.states)}
_, batch_loss = session.run([agent.train_step, agent.loss], feed_dict=feed_dict)
batch_losses.append(batch_loss)
global_memory.reset_memory()
episode_rewards.append(episode_reward)
break
if i % 10:
if np.mean(episode_rewards[:-100]) > 1.0:
solved = True
else:
solved = False
print('Solved:', solved, 'Mean Reward', np.mean(episode_rewards[:-100]))
save_path = agent.saver.save(session, "/tmp/model_1.ckpt")
print("Model saved in path: %s" % save_path)
main()
| [
"gjohn@hm.edu"
] | gjohn@hm.edu |
c2463f705b6ada770b0116ab356204f99231952f | 3dcfd885c12f0d60cce00a076f6ffe4dc0d2e6b8 | /videoseries/accounts/views.py | 1aabeb58bc27f37d1c16dd1423ceb396ba1b1762 | [] | no_license | akapitan/VideoSeries | 85246266c65682278317a9382ee095a7378ab1f6 | e5129ee9e6aa4f03574311af42bc62aa6f8b81ab | refs/heads/master | 2022-12-11T08:54:33.551941 | 2019-02-18T19:55:46 | 2019-02-18T19:55:46 | 170,863,284 | 0 | 0 | null | 2022-12-08T01:36:50 | 2019-02-15T12:42:03 | JavaScript | UTF-8 | Python | false | false | 1,347 | py | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, get_user_model, login, logout
from .forms import UserLoginForm, UserRegisterFrom
from django.contrib.auth.hashers import make_password
def loginView(request):
next = request.GET.get('next')
print(next)
form = UserLoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
if next:
return redirect(next)
return redirect('/')
context = {
'form':form,
}
return render(request, 'accounts/login.html', context)
def registerView(request):
form = UserRegisterFrom(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
password = form.cleaned_data.get("password")
user.password = make_password(form.cleaned_data['password'])
user.save()
new_user = authenticate(username=user.username, password=password)
login(request, new_user)
return redirect('/')
context = {
'form':form,
}
return render(request, 'accounts/register.html', context)
def logoutView(request):
logout(request)
redirect('/') | [
"aleksandarkapitan@gmail.com"
] | aleksandarkapitan@gmail.com |
e58a1949c1e063f280c87919ada6c13561019daa | 93465443f6cb0bfe98c46efa9ad61383fc183470 | /venv/Scripts/pip3.7-script.py | 4818673655ec61a7326e3d3ce767981cb2586aae | [] | no_license | zhangli1229/gy-1906A | 3b1352d82a715d83a8fbc15aeb1ae8fb510739ed | 54aeb5a3788afce9ecb67fcb84faa86a635c74d0 | refs/heads/master | 2020-06-22T20:41:08.829994 | 2019-07-23T09:45:22 | 2019-07-23T09:45:22 | 198,394,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | #!D:\softwaredata\pychrm\untitledgy1906A\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"1208269415@qq.com"
] | 1208269415@qq.com |
bf8a255302642fa1ffb2500deea40a1c1d64f7a5 | 95c022e4da21a545cc750d241339f07f4a3e8243 | /chexpert.py | ca1814e1ff150dc7fa8e0f3dd9f370fbee8e39bf | [] | no_license | hankyul2/ICCV2021_DeepAUC | 44c0b7dc4ae5e39963d7de753d64cbbc6bc28c5c | 3193440baf29cf1e9ff78fc32544fa9e2a996d40 | refs/heads/main | 2023-07-17T13:22:21.742606 | 2021-09-09T21:52:49 | 2021-09-09T21:52:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,324 | py | import numpy as np
import torch
from torch.utils.data import Dataset
import torchvision.transforms as tfs
import cv2
from PIL import Image
import pandas as pd
class CheXpert(Dataset):
'''
Reference:
@inproceedings{yuan2021robust,
title={Large-scale Robust Deep AUC Maximization: A New Surrogate Loss and Empirical Studies on Medical Image Classification},
author={Yuan, Zhuoning and Yan, Yan and Sonka, Milan and Yang, Tianbao},
booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
year={2021}
}
'''
def __init__(self,
csv_path,
image_root_path='',
image_size=320,
class_index=0,
use_frontal=True,
use_upsampling=True,
flip_label=False,
shuffle=True,
seed=123,
verbose=True,
upsampling_cols=['Cardiomegaly', 'Consolidation'],
train_cols=['Cardiomegaly', 'Edema', 'Consolidation', 'Atelectasis', 'Pleural Effusion'],
mode='train'):
# load data from csv
self.df = pd.read_csv(csv_path)
self.df['Path'] = self.df['Path'].str.replace('CheXpert-v1.0-small/', '')
self.df['Path'] = self.df['Path'].str.replace('CheXpert-v1.0/', '')
if use_frontal:
self.df = self.df[self.df['Frontal/Lateral'] == 'Frontal']
# upsample selected cols
if use_upsampling:
assert isinstance(upsampling_cols, list), 'Input should be list!'
sampled_df_list = []
for col in upsampling_cols:
print ('Upsampling %s...'%col)
sampled_df_list.append(self.df[self.df[col] == 1])
self.df = pd.concat([self.df] + sampled_df_list, axis=0)
# impute missing values
for col in train_cols:
if col in ['Edema', 'Atelectasis']:
self.df[col].replace(-1, 1, inplace=True)
self.df[col].fillna(0, inplace=True)
elif col in ['Cardiomegaly','Consolidation', 'Pleural Effusion']:
self.df[col].replace(-1, 0, inplace=True)
self.df[col].fillna(0, inplace=True)
else:
self.df[col].fillna(0, inplace=True)
self._num_images = len(self.df)
# 0 --> -1
if flip_label and class_index != -1: # In multi-class mode we disable this option!
self.df.replace(0, -1, inplace=True)
# shuffle data
if shuffle:
data_index = list(range(self._num_images))
np.random.seed(seed)
np.random.shuffle(data_index)
self.df = self.df.iloc[data_index]
assert class_index in [-1, 0, 1, 2, 3, 4], 'Out of selection!'
assert image_root_path != '', 'You need to pass the correct location for the dataset!'
if class_index == -1: # 5 classes
print ('Multi-label mode: True, Number of classes: [%d]'%len(train_cols))
self.select_cols = train_cols
self.value_counts_dict = {}
for class_key, select_col in enumerate(train_cols):
class_value_counts_dict = self.df[select_col].value_counts().to_dict()
self.value_counts_dict[class_key] = class_value_counts_dict
else: # 1 class
self.select_cols = [train_cols[class_index]] # this var determines the number of classes
self.value_counts_dict = self.df[self.select_cols[0]].value_counts().to_dict()
self.mode = mode
self.class_index = class_index
self.image_size = image_size
self._images_list = [image_root_path+path for path in self.df['Path'].tolist()]
if class_index != -1:
self._labels_list = self.df[train_cols].values[:, class_index].tolist()
else:
self._labels_list = self.df[train_cols].values.tolist()
if verbose:
if class_index != -1:
print ('-'*30)
if flip_label:
self.imratio = self.value_counts_dict[1]/(self.value_counts_dict[-1]+self.value_counts_dict[1])
print('Found %s images in total, %s positive images, %s negative images'%(self._num_images, self.value_counts_dict[1], self.value_counts_dict[-1] ))
print ('%s(C%s): imbalance ratio is %.4f'%(self.select_cols[0], class_index, self.imratio ))
else:
self.imratio = self.value_counts_dict[1]/(self.value_counts_dict[0]+self.value_counts_dict[1])
print('Found %s images in total, %s positive images, %s negative images'%(self._num_images, self.value_counts_dict[1], self.value_counts_dict[0] ))
print ('%s(C%s): imbalance ratio is %.4f'%(self.select_cols[0], class_index, self.imratio ))
print ('-'*30)
else:
print ('-'*30)
imratio_list = []
for class_key, select_col in enumerate(train_cols):
imratio = self.value_counts_dict[class_key][1]/(self.value_counts_dict[class_key][0]+self.value_counts_dict[class_key][1])
imratio_list.append(imratio)
print('Found %s images in total, %s positive images, %s negative images'%(self._num_images, self.value_counts_dict[class_key][1], self.value_counts_dict[class_key][0] ))
print ('%s(C%s): imbalance ratio is %.4f'%(select_col, class_key, imratio ))
print ()
self.imratio = np.mean(imratio_list)
self.imratio_list = imratio_list
print ('-'*30)
@property
def class_counts(self):
return self.value_counts_dict
@property
def imbalance_ratio(self):
return self.imratio
@property
def num_classes(self):
return len(self.select_cols)
@property
def data_size(self):
return self._num_images
def image_augmentation(self, image):
img_aug = tfs.Compose([tfs.RandomAffine(degrees=(-15, 15), translate=(0.05, 0.05), scale=(0.95, 1.05), fill=128)]) # pytorch 3.7: fillcolor --> fill
image = img_aug(image)
return image
def __len__(self):
return self._num_images
def __getitem__(self, idx):
image = cv2.imread(self._images_list[idx], 0)
image = Image.fromarray(image)
if self.mode == 'train':
image = self.image_augmentation(image)
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
# resize and normalize; e.g., ToTensor()
image = cv2.resize(image, dsize=(self.image_size, self.image_size), interpolation=cv2.INTER_LINEAR)
image = image/255.0
__mean__ = np.array([[[0.485, 0.456, 0.406]]])
__std__ = np.array([[[0.229, 0.224, 0.225] ]])
image = (image-__mean__)/__std__
image = image.transpose((2, 0, 1)).astype(np.float32)
if self.class_index != -1: # multi-class mode
label = np.array(self._labels_list[idx]).reshape(-1).astype(np.float32)
else:
label = np.array(self._labels_list[idx]).reshape(-1).astype(np.float32)
return image, label
if __name__ == '__main__':
root = '../chexpert/dataset/CheXpert-v1.0-small/'
traindSet = CheXpert(csv_path=root+'train.csv', image_root_path=root, use_upsampling=True, use_frontal=True, image_size=320, mode='train', class_index=0)
testSet = CheXpert(csv_path=root+'valid.csv', image_root_path=root, use_upsampling=False, use_frontal=True, image_size=320, mode='valid', class_index=0)
trainloader = torch.utils.data.DataLoader(traindSet, batch_size=32, num_workers=2, drop_last=True, shuffle=True)
testloader = torch.utils.data.DataLoader(testSet, batch_size=32, num_workers=2, drop_last=False, shuffle=False)
| [
"noreply@github.com"
] | hankyul2.noreply@github.com |
632fbbd068f482d6e31d78b8e68ab835e815c4bd | 167e3688c4cd702d667695d90627ec0b5fb5b824 | /tracker/map/urls.py | 97d3354f5add3a4930ee527d4027775dbed64b76 | [] | no_license | DarenRen/squirrel_tracker | fa893506b4c7bcc441a89d4eeffb122401d06d74 | 8e8584e1c695bfde147851f6569f7d5c76dbcf80 | refs/heads/master | 2022-05-10T05:53:13.656533 | 2020-02-29T03:26:17 | 2020-02-29T03:26:17 | 225,921,700 | 0 | 0 | null | 2022-04-22T22:50:37 | 2019-12-04T17:30:39 | Python | UTF-8 | Python | false | false | 92 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.map),
]
| [
"dr3055@columbia.edu"
] | dr3055@columbia.edu |
3673a5e5c3975dfa34ed7ff1eaa45b4c72d61729 | 0b3344d9f509fd3e63ec4632a381972c95e429f4 | /Face Reconstruction/3D Face Reconstruction with Weakly-Supervised Learning/recon_demo.py | 8968cffd2f9afb0d5d41a882baf0f80a989ef93a | [
"MIT"
] | permissive | KKhushhalR2405/Face-X | 4704a1f2144c3f71158f885a4e338a572a49e5ac | 4ffb82d4833b230141c1f8dd5829f9ae6cdb3045 | refs/heads/master | 2023-04-04T15:32:46.909615 | 2021-04-14T13:59:29 | 2021-04-14T13:59:29 | 355,666,537 | 3 | 0 | MIT | 2021-04-07T19:55:13 | 2021-04-07T19:55:12 | null | UTF-8 | Python | false | false | 3,920 | py | import os
import glob
import torch
import numpy as np
from models.resnet_50 import resnet50_use
from load_data import transfer_BFM09, BFM, load_img, Preprocess, save_obj
from reconstruction_mesh import reconstruction, render_img, transform_face_shape, estimate_intrinsic
def recon():
# input and output folder
image_path = r'dataset'
save_path = 'output'
if not os.path.exists(save_path):
os.makedirs(save_path)
img_list = glob.glob(image_path + '/**/' + '*.png', recursive=True)
img_list += glob.glob(image_path + '/**/' + '*.jpg', recursive=True)
# read BFM face model
# transfer original BFM model to our model
if not os.path.isfile('BFM/BFM_model_front.mat'):
transfer_BFM09()
device = 'cuda:0' if torch.cuda.is_available() else 'cpu:0'
bfm = BFM(r'BFM/BFM_model_front.mat', device)
# read standard landmarks for preprocessing images
lm3D = bfm.load_lm3d()
model = resnet50_use().to(device)
model.load_state_dict(torch.load(r'models\params.pt'))
model.eval()
for param in model.parameters():
param.requires_grad = False
for file in img_list:
# load images and corresponding 5 facial landmarks
img, lm = load_img(file, file.replace('jpg', 'txt'))
# preprocess input image
input_img_org, lm_new, transform_params = Preprocess(img, lm, lm3D)
input_img = input_img_org.astype(np.float32)
input_img = torch.from_numpy(input_img).permute(0, 3, 1, 2)
# the input_img is BGR
input_img = input_img.to(device)
arr_coef = model(input_img)
coef = torch.cat(arr_coef, 1)
# reconstruct 3D face with output coefficients and face model
face_shape, face_texture, face_color, landmarks_2d, z_buffer, angles, translation, gamma = reconstruction(coef, bfm)
fx, px, fy, py = estimate_intrinsic(landmarks_2d, transform_params, z_buffer, face_shape, bfm, angles, translation)
face_shape_t = transform_face_shape(face_shape, angles, translation)
face_color = face_color / 255.0
face_shape_t[:, :, 2] = 10.0 - face_shape_t[:, :, 2]
images = render_img(face_shape_t, face_color, bfm, 300, fx, fy, px, py)
images = images.detach().cpu().numpy()
images = np.squeeze(images)
path_str = file.replace(image_path, save_path)
path = os.path.split(path_str)[0]
if os.path.exists(path) is False:
os.makedirs(path)
from PIL import Image
images = np.uint8(images[:, :, :3] * 255.0)
# init_img = np.array(img)
# init_img[images != 0] = 0
# images += init_img
img = Image.fromarray(images)
img.save(file.replace(image_path, save_path).replace('jpg', 'png'))
face_shape = face_shape.detach().cpu().numpy()
face_color = face_color.detach().cpu().numpy()
face_shape = np.squeeze(face_shape)
face_color = np.squeeze(face_color)
save_obj(file.replace(image_path, save_path).replace('.jpg', '_mesh.obj'), face_shape, bfm.tri, np.clip(face_color, 0, 1.0)) # 3D reconstruction face (in canonical view)
from load_data import transfer_UV
from utils import process_uv
# loading UV coordinates
uv_pos = transfer_UV()
tex_coords = process_uv(uv_pos.copy())
tex_coords = torch.tensor(tex_coords, dtype=torch.float32).unsqueeze(0).to(device)
face_texture = face_texture / 255.0
images = render_img(tex_coords, face_texture, bfm, 600, 600.0 - 1.0, 600.0 - 1.0, 0.0, 0.0)
images = images.detach().cpu().numpy()
images = np.squeeze(images)
# from PIL import Image
images = np.uint8(images[:, :, :3] * 255.0)
img = Image.fromarray(images)
img.save(file.replace(image_path, save_path).replace('.jpg', '_texture.png'))
if __name__ == '__main__':
recon()
| [
"aswingopinathan1871@gmail.com"
] | aswingopinathan1871@gmail.com |
4c237c4261c272ccd358f3f831d901bcebd60b08 | 658c6b7cc740083d27d3b0a1e6d87c941caa1aa6 | /src/controller/controller.py | 83e94d2866b74d06d4bd0f13da1846eb41961cae | [] | no_license | glescki/pronondb | 9d68cb07afb50658489592c485a1d9f9ad44c696 | 2b3d1f468f75b3557b8638b669c46e5d97212096 | refs/heads/master | 2022-11-29T02:58:39.895241 | 2020-08-11T15:42:42 | 2020-08-11T15:42:42 | 251,600,600 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,007 | py | import re
import datetime
import csv
import tkinter as tk
from tkinter import ttk
from tkinter.ttk import Progressbar
from pathlib import Path
from model.model import Model
from view.view import *
class Controller(tk.Tk):
def __init__(self, ip, db):
self.ip = ip
self.db = db
tk.Tk.__init__(self)
self.title("Pronon DB test")
self.geometry('800x600')
style = ttk.Style(self)
style.theme_use('clam')
self.make_layout()
self.check_credentials()
self.mainloop()
def check_credentials(self):
credentials_modal = Dialog(self)
user, pswd = credentials_modal.show()
if user == '' and pswd == '':
self.handler(2, 1, "Erro: Usuário e senha não podem ser vazios")
self.destroy()
return
self.model = Model(self.ip, user, pswd, self.db)
result, code, message = self.model.con_check()
if result:
self.handler(result, code, message)
self.destroy()
return
else:
tk.messagebox.showinfo("Sucesso!", "Conexão com o banco de dados foi um sucesso!")
self.user = user
self.pswd = pswd
def make_layout(self):
self.container = tk.Frame(self)
self.container.pack(side='top', fill='both', expand=True)
self.menu = Menu(self.container, self)
self.toolbar = Toolbar(self.container, self)
self.navbar = Navbar(self.container, self)
self.main = Main(self.container, self)
# self.statusbar.pack(side='bottom', fill='x')
self.toolbar.pack(side='top', fill='x')
self.navbar.pack(side='left', fill='y')
self.main.pack(side='right', fill='both', expand=True)
self.main.grid_rowconfigure(0, weight=1)
self.main.grid_columnconfigure(0, weight=1)
self.container.focus_set()
self.bind('<KeyRelease-Alt_L>', self.menu.toggle_menubar)
def show_frame(self, page_name):
self.main.destroy()
# create the new page and pack it in the container
cls = globals()[page_name]
self.main = cls(self.container, self)
self.main.pack(fill="both", expand=True)
def send_query(self, command, table, columns, query_values=None, where=None, where_values=None):
return_value = 3, '?', 'MySQL command not found'
if command == 'INSERT':
message = 'INSERT into `' + table + \
'` ' + str(columns) + ' VALUES '
nr_values = tuple(['%s' for s in query_values])
message = message + str(nr_values)
message = message.replace("'", "")
return_value = self.model.insert(message, query_values)
if command == 'SELECT':
where_stat = ''
if where:
nr_values = tuple(['%s' for s in where])
where_stat = ' WHERE ' + str(where) + '=' + where_values
message = 'SELECT ' + str(columns) + ' FROM ' + table + where_stat
return_value = self.model.select(message)
if command == 'UPDATE':
set_stat = ''
for column in columns:
set_stat = set_stat + column+'=%s, '
set_stat = set_stat[:-2]
where_stat = ' WHERE ' + str(where) + '=' + where_values
message = 'UPDATE ' + table + ' SET ' + set_stat + where_stat
return_value = self.model.update(message, query_values)
if command == 'DELETE':
where_stat = ' WHERE ' + str(where) + '=' + where_values
message = 'DELETE from ' + table + where_stat
return_value = self.model.delete(message)
if command != 'SELECT':
self.insert_log(command, message, query_values, table)
return return_value
def insert_log(self, command, query, query_values, table):
query = query.replace('%s, ', '')
splitted_query = query.split(' ')
if command == 'INSERT':
index = -1
if command == 'UPDATE' or command == 'DELETE':
index = -3
splitted_query[index] = str(query_values).strip('[]')
final_query = ' '.join(map(str, splitted_query))
time = datetime.datetime.now()
time_str = time.strftime('%Y-%d-%m %H:%M:%S')
message = 'INSERT into LogPreenchimento (Comando, Query, Tabela, Data, Nome) VALUES (%s, %s, %s, %s, %s)'
log_values = (command, final_query, table, time_str, self.user)
return_value = self.model.insert(message, log_values)
def handler(self, error_type, code, message):
if error_type == 1:
tk.messagebox.showwarning(
title="Mysql Aviso", message=str(code)+': '+message)
if error_type == 2:
tk.messagebox.showerror(
title="Mysql Erro", message=str(code)+': '+message)
def get_rows_ids(self, table):
table_values = self.send_query('SELECT', table, '*')
ids = [x['id'+table] for x in table_values[2]]
return ids
def get_rows(self, table):
return self.send_query('SELECT', table, '*')
def export_data(self, table):
table_values = self.get_rows(table)
rows = table_values[2]
cursor = table_values[3]
file_path = Path.cwd()
base_path = file_path.parent
tmp_path = base_path / 'tmp/'
csv_filename = table + '.csv'
csv_file_path = tmp_path / csv_filename
result = []
column_names = []
for i in cursor.description:
column_names.append(i[0])
result.append(column_names)
for row in rows:
result.append(row.values())
# Write result to file.
with open(csv_file_path, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for r in result:
# print(r)
csvwriter.writerow(r)
| [
"glecki@gmail.com"
] | glecki@gmail.com |
ea83bf3f9920c2218538afdaf6f6dc85c9be3009 | 3e1b96867f2d41296928749af4fa436a198d070f | /Miscellaneous/LRUcache.py | eb3ca551a11fe34d0e16965c6921b731e5a7c9e0 | [] | no_license | maryala9/InterviewPractise | 8c39d6f875d324cb8df9ce2c99559489f1401327 | 1996e6981b7657168573c10341abbed2ada8ed39 | refs/heads/master | 2021-06-09T15:08:56.498502 | 2021-05-25T23:44:07 | 2021-05-25T23:44:07 | 173,798,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | class LRUCache:
def __init__(self, capacity):
from collections import deque
self.capacity = capacity
self.dic = {}
self.deque = deque([])
def get(self, key):
if key not in self.dic: return -1
self.deque.remove(key)
self.deque.append(key)
return self.dic[key]
def put(self, key, value):
if key in self.dic:
self.deque.remove(key)
self.deque.append(key)
self.dic[key] = value
elif len(self.dic) == self.capacity:
val = self.deque.popleft()
self.dic.pop(val)
self.deque.append(key)
self.dic[key] = value
else:
self.deque.append(key)
self.dic[key] = value
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value) | [
"pmaryala@carrentals.com"
] | pmaryala@carrentals.com |
66e5be1ad43ded41a8eafa271daa533769610ace | 5af6a741fcc6e327a4ed5551596e461c96fa9b46 | /progjar2/tugas_udp_2/run_client_duluan/serverBroadcast_alpine4.py | 4c62da7da2b7dc7fb01c125016a304cffb084d31 | [] | no_license | afiffadhlurrahman/Pemrograman_Jaringan_E | 377f1942f0782efacbf9da6bf8c1a262cb234e35 | ad9833017836f79c1add7594c2bc17e3cf8b815f | refs/heads/master | 2023-06-28T04:23:20.674854 | 2021-07-19T17:29:51 | 2021-07-19T17:29:51 | 348,617,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | import socket
SERVER_IP = '192.168.122.101'
SERVER_PORT = 5005
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind(("", SERVER_PORT))
while True:
data, addr = sock.recvfrom(1024)
# buffer size 1024
print(addr)
print("diterima ", data)
print("dikirim oleh ", addr)
| [
"noreply@github.com"
] | afiffadhlurrahman.noreply@github.com |
c7f523807f996cae2f07692c4918cebcb18a824f | b37fdefb01d7b93a4f56a7c7cc60f9f78549de4c | /DI_Bootcamp/Week_9/Day_1/Exercise_XP/film_project_root/account_app/views.py | 15d82374b6660e2d3071afe8839fff8d9102006d | [] | no_license | AchimGoral/DI_Bootcamp | e7b13d7397ab5c9e5ad8041430c8bfbafec13c88 | 9345731503e2bb298bd3a579ffad590350f13df5 | refs/heads/main | 2023-04-18T20:06:45.631067 | 2021-05-01T08:08:45 | 2021-05-01T08:08:45 | 328,769,128 | 0 | 1 | null | 2021-01-27T14:30:09 | 2021-01-11T19:24:48 | HTML | UTF-8 | Python | false | false | 2,040 | py | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib import messages
from .models import *
from .forms import *
def sign_up(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
# Stay logged in after signing up
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password1'],)
login(request, user)
return redirect('homepage')
else:
form = RegistrationForm()
return render(request, 'sign_up.html', {'form': form})
def login_view(request):
if request.method == "GET":
my_form = LoginForm()
return render(request, 'login.html', {'my_form': my_form})
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('homepage')
else:
messages.error(request, 'Username and/or password incorrect. Please try again')
return redirect('login')
def logout_view(request):
logout(request)
return redirect ('homepage')
def profile(request, pk):
my_profile = User.objects.get(id=pk)
return render(request, 'profile.html', {'my_profile': my_profile})
def profile_edit(request):
if request.method == "GET":
user_form = UserChange()
return render(request, 'edit_user.html', {'user_form': user_form})
if request.method == "POST":
user_form = UserChange(request.POST, instance = request.user)
if user_form.is_valid():
user_form.save()
return redirect('homepage')
else:
user_form = UserChange()
return render(request, 'edit_user.html', {'user_form': user_form}) | [
"achimgoral.il@gmail.com"
] | achimgoral.il@gmail.com |
739dcccea57984590744ae2f3ded4c36880ba683 | d1cdb4545edd7e0f1400a41568ed273e49b5b810 | /formulae/email.py | 084c8384342d907a9e9bdb95157ff0dcf67ee876 | [] | no_license | CelCpc/formulae-capitains-nemo | 5de42775c4f09fb78e5bd3080e7d8d4c355bb1e6 | 93069b952ddf213f4c38ef2f1ce8e0db8c1280ad | refs/heads/master | 2020-08-10T16:29:53.961506 | 2019-10-02T12:56:30 | 2019-10-02T12:56:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | from flask import current_app
from flask_mail import Message
from threading import Thread
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
""" Helper function to send email
:param subject: Email subject line
:param sender: Email sender
:param recipients: List of email recipients
:param text_body: The plain text body of the email
:param html_body: The HTML body of the email
"""
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()
| [
"matthew.munson@uni-hamburg.de"
] | matthew.munson@uni-hamburg.de |
1b252b4d048d11b63a36045382cc884df015a010 | a69c3e2c1e9ac07bc33b9200b32a8587816d7ccc | /testing/HdrCompression/execution/pcaps/getSizePcaps.py | 170306e20459d553b008da8a02ec722c88df4ad8 | [
"Apache-2.0"
] | permissive | eniac/Flightplan | c6a517015ae059e1d53053b0d7c2e5b6c8db277f | ce88672ee71f3a47cc4d8c9139002b36cf9048cd | refs/heads/master | 2023-04-09T06:41:20.660288 | 2021-04-14T14:12:43 | 2021-04-14T14:12:43 | 292,552,969 | 23 | 3 | Apache-2.0 | 2021-01-24T15:51:52 | 2020-09-03T11:41:59 | C | UTF-8 | Python | false | false | 172 | py | from scapy.all import *
import sys
pkts = rdpcap(sys.argv[1], 10)
pcap_size = 0
for pkt in pkts:
pcap_size += len(pkt)
print("size of pcap: " + str(pcap_size))
| [
"ncshy@seas.upenn.edu"
] | ncshy@seas.upenn.edu |
e12fb9c79b7f5cda59bb2bc26f2cd7784b829c15 | fbbd1ae50ee25b82fab93d3c111ae04719b44c79 | /PythonNetworking/Lab5/server.py | e0cb6999479b6874bc5ce792e892201e089c8f88 | [] | no_license | DonMacary/IQT | cb06e854f778363b9cc4b0449c6b61d463ff3b7c | c273837b51ba9caaebd531eb800852275bec4b32 | refs/heads/master | 2023-01-28T07:44:57.297424 | 2023-01-18T16:12:31 | 2023-01-18T16:12:31 | 155,270,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | """
Author: ELF
Course: Python
version: Python 2.7
FileName: chatServer.py
Lab5A
Instructions:
Write a TCP Server that will generate a random number from 0 to 100
Then write a TCP Client that will receive an input from the user
(number 0 to 100) and send the guess to the server. The server will
then send back a message prompting the user to guess higher or lower.
If the user guesses the correct number, have the server send back a
success message and when the client receives the success message it
will break the connection (close the socket).
"""
| [
"Dmacaryjr@gmail.com"
] | Dmacaryjr@gmail.com |
edadd4626ab2b03e9ee09e4700f27d86bdbb01ed | fd19c8f74d47b9900ae55caf1502b95946f49461 | /SearchAndSort/Fibonacci.py | 33128a98d64526c9df5569724b6206363fefbc5d | [] | no_license | rohan2jos/SideProjects | 56724c2dd4cfb9ae05b84204a1a8dc2c37fbb9f4 | 1ed2113086cf24282b4e4c126b7e018944d806ef | refs/heads/master | 2021-01-20T20:52:21.859442 | 2018-02-25T00:27:26 | 2018-02-25T00:27:26 | 61,975,575 | 0 | 1 | null | 2016-07-04T17:27:22 | 2016-06-26T05:44:08 | HTML | UTF-8 | Python | false | false | 236 | py | def fib(n):
if n == 0:
return 0
if n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
def mainFunc():
arr = []
for i in range(0, 5):
arr.append(fib(i))
return arr
mainFunc() | [
"rohan2jos@gmail.com"
] | rohan2jos@gmail.com |
4f445597c5ac30039c0f3c3333dae8b68184c0c5 | 9c862bb7f9ac093a9bcf17d9060389dbbb8b655b | /examples/instrumentation/19_show_window_tree.py | 3f1c33ed09182131494b8863549ee7626b2aad1c | [] | no_license | fabioz/winappdbg | 24917ce29a90a08e890e8cd7d44feaad22daf0c4 | 1603870dc3fa3d2984ef23b6d77e400fb0a21b99 | refs/heads/master | 2023-08-02T07:40:10.496090 | 2020-04-22T15:18:42 | 2020-04-22T15:18:42 | 23,669,656 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,702 | py | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# $Id$
from winappdbg import System, HexDump
def show_window_tree( window, indent = 0 ):
# Show this window's handle and caption.
# Use some ASCII art to show the layout. :)
handle = HexDump.integer( window.get_handle() )
caption = window.get_text()
line = ""
if indent > 0:
print "| " * indent
line = "| " * (indent - 1) + "|---"
else:
print "|"
if caption is not None:
line += handle + ": " + caption
else:
line += handle
print line
# Recursively show the child windows.
for child in window.get_children():
show_window_tree( child, indent + 1 )
def main():
# Create a system snaphot.
system = System()
# Get the Desktop window.
root = system.get_desktop_window()
# Now show the window tree.
show_window_tree(root)
# You can also ge the tree as a Python dictionary:
# tree = root.get_tree()
# print tree
if __name__ == '__main__':
main()
| [
"mvilas@gmail.com"
] | mvilas@gmail.com |
74886e64e262f24b4c6ffc9377592e4abb525f7e | a3bca97fcbd5d533cee094eb50d5f62b3bfde85f | /chapter5-booleans/test4.py | b07e54056e6dbe6596589047056ed33e81a9863d | [] | no_license | sineundong/python_minecraft | f0c0b83884e021f98427d0e2271ede2eaaf0b845 | 7aa7c85f32a0f0a1d163087cf1dd7e30e336c9e1 | refs/heads/master | 2020-05-31T23:06:07.705416 | 2019-06-06T07:38:15 | 2019-06-06T07:38:15 | 190,532,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from mcpi.minecraft import Minecraft
import time
from mcpi import block
mc=Minecraft.create()
x,y,z=mc.player.getPos()
fromt=5
blocktype = block.GOLD_BLOCK.id
mc.setBlock(x+fromt,y,z,blocktype)
blocktype2 = mc.getBlock(x+fromt,y,z)
mc.postToChat(blocktype == blocktype2)
if blocktype!=blocktype2:
print("same")
else:
print("different")
# a=2 a==2
# == != >= <= | [
"dmsehd1308@naver.com"
] | dmsehd1308@naver.com |
88c6e853c52aba7642a9153ee3421fe9ab91a94d | 78007218df2dacb9a67132d2a88821cc87e0ed99 | /cnn.py | 2d8ce3c412ba140da26cfdf3855cdf8b24413734 | [] | no_license | seama107/tensorflow-image-classification | c5dbfa92550553c2682b431696870ad0c4825b47 | 0ec1f12575798493995fed8ff8ed1cf8372639b2 | refs/heads/master | 2021-05-10T00:15:00.435136 | 2018-01-24T22:18:09 | 2018-01-24T22:18:09 | 118,824,809 | 0 | 0 | null | 2018-01-24T21:32:26 | 2018-01-24T21:32:26 | null | UTF-8 | Python | false | false | 4,375 | py | import tensorflow as tf
from datetime import timedelta
import time
def new_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def new_biases(length):
return tf.Variable(tf.constant(0.05, shape=[length]))
def new_conv_layer(input, # The previous layer.
num_input_channels, # Num. channels in prev. layer.
filter_size, # Width and height of each filter.
num_filters, # Number of filters.
use_pooling=True): # Use 2x2 max-pooling.
# Shape of the filter-weights for the convolution.
# This format is determined by the TensorFlow API.
shape = [filter_size, filter_size, num_input_channels, num_filters]
# Create new weights aka. filters with the given shape.
weights = new_weights(shape=shape)
# Create new biases, one for each filter.
biases = new_biases(length=num_filters)
# Create the TensorFlow operation for convolution.
# Note the strides are set to 1 in all dimensions.
# The first and last stride must always be 1,
# because the first is for the image-number and
# the last is for the input-channel.
# But e.g. strides=[1, 2, 2, 1] would mean that the filter
# is moved 2 pixels across the x- and y-axis of the image.
# The padding is set to 'SAME' which means the input image
# is padded with zeroes so the size of the output is the same.
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Add the biases to the results of the convolution.
# A bias-value is added to each filter-channel.
layer += biases
# Use pooling to down-sample the image resolution?
if use_pooling:
# This is 2x2 max-pooling, which means that we
# consider 2x2 windows and select the largest value
# in each window. Then we move 2 pixels to the next window.
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Rectified Linear Unit (ReLU).
# It calculates max(x, 0) for each input pixel x.
# This adds some non-linearity to the formula and allows us
# to learn more complicated functions.
layer = tf.nn.relu(layer)
# Note that ReLU is normally executed before the pooling,
# but since relu(max_pool(x)) == max_pool(relu(x)) we can
# save 75% of the relu-operations by max-pooling first.
# We return both the resulting layer and the filter-weights
# because we will plot the weights later.
return layer, weights
def flatten_layer(layer):
# Get the shape of the input layer.
layer_shape = layer.get_shape()
# The shape of the input layer is assumed to be:
# layer_shape == [num_images, img_height, img_width, num_channels]
# The number of features is: img_height * img_width * num_channels
# We can use a function from TensorFlow to calculate this.
num_features = layer_shape[1:4].num_elements()
# Reshape the layer to [num_images, num_features].
# Note that we just set the size of the second dimension
# to num_features and the size of the first dimension to -1
# which means the size in that dimension is calculated
# so the total size of the tensor is unchanged from the reshaping.
layer_flat = tf.reshape(layer, [-1, num_features])
# The shape of the flattened layer is now:
# [num_images, img_height * img_width * num_channels]
# Return both the flattened layer and the number of features.
return layer_flat, num_features
def new_fc_layer(input, # The previous layer.
num_inputs, # Num. inputs from prev. layer.
num_outputs, # Num. outputs.
use_relu=True): # Use Rectified Linear Unit (ReLU)?
# Create new weights and biases.
weights = new_weights(shape=[num_inputs, num_outputs])
biases = new_biases(length=num_outputs)
# Calculate the layer as the matrix multiplication of
# the input and weights, and then add the bias-values.
layer = tf.matmul(input, weights) + biases
# Use ReLU?
if use_relu:
layer = tf.nn.relu(layer)
return layer
| [
"micseam@yahoo.com"
] | micseam@yahoo.com |
dfd2e20af52b997ca2c30f3e5abe74095b8ad76b | e5d5fa28999bcc6c642bb42dda93afd38e272b81 | /UVA/531 - Compromise/generate.py | 6dee0d625c29b89113c0412c8f3c2aec4602f471 | [] | no_license | chiahsun/problem_solving | cd3105969983d16d3d5d416d4a0d5797d4b58e91 | 559fafa92dd5516058bdcea82a438eadf5aa1ede | refs/heads/master | 2023-02-05T06:11:27.536617 | 2023-01-26T10:51:23 | 2023-01-26T10:51:23 | 30,732,382 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | import random;
import string;
random.seed(0);
def generate_seq(n_char = 100):
for _ in range(n_char):
print(random.choice(string.ascii_lowercase), end=' ');
print('\n#');
generate_seq();
generate_seq();
| [
"chiahsun0814@gmail.com"
] | chiahsun0814@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.