blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
545f9c9996ab5474d901de26054f8c5201f37353 | 76388f7547892fba628fa4b7f373f1d55fea7efc | /forschung_ws/build/aruco_detector/catkin_generated/pkg.develspace.context.pc.py | 575f1efff29f1f265ddb7e3ccebc7edf1df33d59 | [] | no_license | HuiwenPan/workspace | 85678486f19fb9e0413d5ffc9fff96ae6e784f71 | 167b55a07894b7f7cd5b14630d4237fcfc4a16d0 | refs/heads/master | 2021-06-18T08:49:33.591236 | 2021-04-19T07:57:30 | 2021-04-19T07:57:30 | 189,837,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "cv_bridge;geometry_msgs;roscpp;std_msgs;tf".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "aruco_detector"
PROJECT_SPACE_DIR = "/home/phw/workspace/forschung_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"ga27keq@mytum.de"
] | ga27keq@mytum.de |
08c4ce8c98389109caa8e6e14783e9085eee914c | 4c8b38b2fc0f5e60c5f020df331107665bd44a72 | /instagram_project/asgi.py | b22edf1e05fee101a47d5dbde8c5e18461e2d673 | [] | no_license | Ehtehsam/Instagram-clone-application | c0d6329d4af167ab902619ee36a10ba7d40d2e1e | a17ef92a93e1cc26119c4602a4ae6128442fbbf6 | refs/heads/main | 2023-05-31T04:58:13.182801 | 2021-06-13T17:24:54 | 2021-06-13T17:24:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | """
ASGI config for instagram_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'instagram_project.settings')
application = get_asgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
1adac6008f628ed96ab600f1e7318a453a711d96 | 69e0e923a04e0a7cdebbb3c9720f4d501671843a | /lewis_arepo_code/interpolation_test.py | cd5980966dc5fbaebab550157f587171f9994ef4 | [] | no_license | lewisprole/Pop3_Bfield | 132bd804c8e587ebe8f8efdec8bbc46c0cf619ad | 61f8643f5794e793f5aea8ab07a787f3f20ac1d1 | refs/heads/master | 2021-11-20T14:40:00.963868 | 2021-08-11T09:38:45 | 2021-08-11T09:38:45 | 213,593,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,397 | py | import numpy as np
import arepo_input_writer
import velocities
import spherical_spray
import radial_density
import internal_energy
import mass
import code_units
import calculate_radius
import astropy.constants as ap
import matplotlib.pyplot as plt
import plot3D
import field_maker
'''most of this doesn't matter, just trying to get an increasingly resolved random spray of cells'''
#box parameters - Bonnor Ebert sphere in diffuse ISM
T=200
mu=1
#M=1.989e33
r=1
boxsize=4*r
mid=boxsize/2
G=ap.G.cgs.value
bx,by,bz=field_maker.create_nonscaled_Bfield(50,3/2)#10,3/2)
B=np.sqrt(bx**2+by**2+bz**2)
B=B.max()
bx,by,bz=bx/B,by/B,bz/B
vx,vy,vz=field_maker.create_nonscaled_Bfield(50,-2)#10,-2)
V=np.sqrt(vx**2+vy**2+vz**2)
V=V.max()
vx,vy,vz=vx/V,vy/V,vz/V
#positions
Nfield=50**3#10**3
N=int(Nfield) * np.array([4])#np.array([5e-1])#1e-1,1,10])
locs='1e-1','1','10'
for i in range(len(N)):
print(N[i])
x,y,z=spherical_spray.uniform_spray(int(N[i]),boxsize,boxsize,boxsize)
Bx,By,Bz=field_maker.interpolate(bx,by,bz,x,y,z,boxsize)
Vx,Vy,Vz=field_maker.interpolate(vx,vy,vz,x,y,z,boxsize)
rho=np.ones_like(x)*boxsize**3/N[i]
#others
ids =np.linspace(1,len(x),len(x)).astype(int)
U=internal_energy.int_en(len(x),T,mu)
sofar=[]
npart=(len(x),0,0,0,0,0)
massarr=(0,0,0,0,0,0)
time=0
redshift=0
flag_sfr=0
flag_feedback=0
npartTotal=(len(x),0,0,0,0,0)
flag_cooling=0
num_files=1
cos1=0
cos2=0
hubble_param=1
flag_stellarage=0
flag_metals=0
npartHighword=(0,0,0,0,0,0)
flag_entropy=0
flag_dp=1
flag_1pt=0
scalefactor=1
#write ICs file
sofar=arepo_input_writer.header(sofar,npart,massarr,time,redshift,flag_sfr,flag_feedback,
npartTotal,flag_cooling,num_files,boxsize,cos1,cos2,
hubble_param,flag_stellarage,flag_metals,npartHighword,
flag_entropy,flag_dp,flag_1pt,scalefactor)
sofar=arepo_input_writer.tag_block(sofar,(x,y,z),'POS ','d',3)
sofar=arepo_input_writer.tag_block(sofar,(Vx,Vy,Vz),'VEL ','d',3)
sofar=arepo_input_writer.tag_block(sofar,ids,'ID ','i',1)
sofar=arepo_input_writer.tag_block(sofar,rho,'MASS','d',1)
sofar=arepo_input_writer.tag_block(sofar,U,'U ','d',1)
sofar=arepo_input_writer.tag_block(sofar,(Bx,By,Bz),'BFLD','d',3)
#print('/scratch/c.c1521474/interpolation_trial/'+locs[i]+'/arepo_input.dat')
arepo_input_writer.writer(sofar,'/scratch/c.c1521474/interpolation_trial/4/arepo_input.dat')
| [
"prolel@cardiff.ac.uk"
] | prolel@cardiff.ac.uk |
9ea0190bdb7d47850255170b8be8594d1c1885f1 | 43c628960263f7418f323e79f9b35a256b5b2c2a | /varios/k-means.py | 7180b4e51479418b5df84bc365b17c66a17be992 | [] | no_license | jcsuscriptor/pythonLabs | fe1e166ec54f355ca08a2a1945f7c6c5aa14f1c1 | 42cdd0e49b09133f56dc5e360ea38e70f20487d9 | refs/heads/master | 2021-07-22T20:39:11.384327 | 2017-11-02T05:08:23 | 2017-11-02T05:08:23 | 109,075,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | import numpy as np
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
cluster1 = np.random.uniform(0.5, 1.5, (2, 10))
cluster2 = np.random.uniform(3.5, 4.5, (2, 10))
X = np.hstack((cluster1, cluster2)).T
K = range(1, 10)
meandistortions = []
for k in K:
kmeans = KMeans(n_clusters=k)
kmeans.fit(X)
meandistortions.append(sum(np.min(cdist(X, kmeans.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0])
plt.plot(K, meandistortions, 'bx-')
plt.xlabel('k')
plt.ylabel('Average distortion')
plt.title('Selecting k with the Elbow Method')
plt.show() | [
"jcsuscriptor@gmail.com"
] | jcsuscriptor@gmail.com |
ed96d3426a27ada9979c822bfc9c783cf9d0a39b | 0a5a1c59d4c7ee62dcb9c93e2c814e499e159040 | /parser.py | 1308d992cac3517372c3440e93c1324af646c2cf | [] | no_license | Shvadchak/alerter | 327e6ebc1bb8a10557e7f85479b30b6a3705348f | 82eb659f9d5b4443996cd59286fe98df7c91d909 | refs/heads/master | 2020-04-06T23:01:14.094372 | 2018-11-16T10:37:31 | 2018-11-16T10:37:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,775 | py | #! /usr/bin/python
import requests
import json
import time
url = "https://api.telegram.org/bot758261552:AAE1zVA2sHNw_WxDtVZolbLivX3-W8Xhd6k/"
response = requests.get("https://api.ethermine.org/miner/:0x6ddd79c6e71d4bfca125f4ae38c578af8c103daa/history/")
data = response.json()["data"][len(response.json()["data"])-1]
mhz = (int(data.get(u'reportedHashrate')))/1000000
response2 = requests.get("https://api.ethermine.org/miner/:0x93665d08f3581c1fa4cb30eaadee0b18ddc7b6cb/history/")
data2 = response2.json()["data"][len(response2.json()["data"])-1]
mhz2 = (int(data2.get(u'reportedHashrate')))/1000000
print ("ReportedHashrate_bh: " + str(mhz) + " MH/s")
print ("ReportedHashrate_bg: " + str(mhz2) + " MH/s")
def get_updates_json(request):
response = requests.get(request + 'getUpdates')
return response.json()
def last_update(data):
results = data['result']
total_updates = len(results) - 1
return results[total_updates]
def get_chat_id(update):
chat_id = update['message']['chat']['id']
return chat_id
def send_mess(chat, text):
params = {'chat_id': chat, 'text': text}
response = requests.post(url + 'sendMessage', data=params)
return response
chat_id = get_chat_id(last_update(get_updates_json(url)))
send_mess(chat_id, "Monitoring service has been started\n" "ReportedHashrate_bh: " + str(mhz) + " MH/s\n" "ReportedHashrate_bg: " + str(mhz2) + " MH/s")
send_mess(chat_id, u'\U0001F4B0' + u"\U0001F680" + u'\U0001F4B0' + u"\U0001F680" + u'\U0001F4B0' + u"\U0001F680")
def monitoring():
if mhz > 100 and mhz2 > 100:
time.sleep(600)
else:
send_mess(chat_id, "Mining has been down, needs maintenance" + u"\U0001F4A3" + u"\U0001F4A3" + u"\U0001F4A3")
time.sleep(2000)
while True:
monitoring()
| [
"leo160886@gmail.com"
] | leo160886@gmail.com |
d4ed80ef99e75147cf94d38123db90192153fcf0 | 3011e024b5f31d6c747a2bd4a143bb6a0eeb1e1d | /chapter03/template_verbatim_demo/template_verbatim_demo/settings.py | 018ef380d882164bd7cc916c0025ee8d08898a97 | [] | no_license | yingkun1/python-django | a3084460a83682f3e0848d5b40c881f93961ecc2 | 08c9ed3771eb245ee9ff66f67cf28730d2675bbe | refs/heads/master | 2022-12-11T12:33:20.788524 | 2019-06-12T09:30:59 | 2019-06-12T09:30:59 | 189,977,625 | 1 | 0 | null | 2022-11-22T02:57:01 | 2019-06-03T09:43:30 | Python | UTF-8 | Python | false | false | 3,178 | py | """
Django settings for template_verbatim_demo project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'my9sj^v@hs777+5b4$yqf(&qz64v%!^ac^uxq(^r3gk@=*w(0u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'template_verbatim_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'template_verbatim_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"925712087@qq.com"
] | 925712087@qq.com |
b61b136e508f3e81fcbdff3a302d5246eace0d16 | f433f7c884729e697ae110fa5a795dcc04449e42 | /Code_tianyu/AA_ddpg_iLQR - control/main.py | ccb2363ff91cf8fe36879d5e7aafb0a7b8aadb77 | [] | no_license | hzm2016/iLQG | f05fbf98543f2a016cee1ced1e6562b65748a262 | 5b3d5712ece14cbe6aefda00535c65149c27d313 | refs/heads/master | 2020-06-01T17:04:37.135285 | 2019-06-08T07:38:23 | 2019-06-08T07:38:23 | 190,858,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,627 | py | import winsound
import traceback
import matplotlib.pyplot as plt
import time
from iENV import Env_PeginHole
from iDDPG import DDPG
import os
import pickle
import numpy as np
from HMI import multicoreHMI
from iIMAGINATION import imaginationROLLOUTS
from iLQR_controller import iLQR, fd_Cost, fd_Dynamics, myCost
import numdifftools as nd
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 忽略的CPU警告
MAX_TRAIN_EPISODES = 25 # 训练episode个数
MAX_EP_STEPS = 100
MAX_EVA_EPISODES = 10 # 评价episode个数
# 训练?评价?
RESTORE_AGENT = True
ON_TRAIN = True # True False
ENABLE_IR = False # 是否使用 imaginationROLLOUTS
NUM_IR = 5
ENABLE_ALERT = False
ENABLE_ILQR = False
PLAY_SOUND = False # 播放提示音
SHOW_TRAIN_RESULT = False
SHOW_EVAL_RESULT = False
TRAIN_ROUND = 5
USE_ACTION_BIAS = False
NUM_DEMON = 10
TUBE_ENABLE = False
SAVE_INTERVAL = 10
path_train_seqs_record = 'myRecord/train_seqs_record.pkl'
path_eval_seqs_record = 'myRecord/eval_seqs_record.pkl'
def get_hmi():
ctw_hmi = hmi.get_hmi()
global play_sound
if ctw_hmi == 'S':
play_sound = True
print('【EPISODE提示音】开启')
if ctw_hmi == 's':
play_sound = False
print('【EPISODE提示音】关闭')
def train(env, agent_ddpg): # start training
# agent_ddpg = DDPG(a_dim, s_dim)
global play_sound
global enable_ir
global enable_ilqr
env.seed(200)
file_log = open("Log_train.txt", 'w')
train_seqs_record = []
if RESTORE_AGENT:
agent_ddpg.restore() # 读取之前的训练成果
fr = open(path_train_seqs_record, 'rb')
train_seqs_record = pickle.load(fr)
fr.close()
if agent_ddpg.iid > 0:
fr = open(path_train_seqs_record, 'rb')
train_seqs_record = pickle.load(fr)
fr.close()
action_std = 0.1 # 动作噪声标准差。对应action范围[-1,1]
action_bias_weight = 0.1
ir = imaginationROLLOUTS()
for i in range(MAX_TRAIN_EPISODES):
get_hmi()
s, _ = env.reset(agent_ddpg)
ir.reset_localFitting()
ep_reward = 0
ep_step = 0
seq_record = []
s_terminal = 0
flag_emergency = False
if i >= 50:
enable_ir = False
if agent_ddpg.flag_train_start:
enable_ilqr = False
ilqr_a_init = np.zeros(a_dim)
for j in range(MAX_EP_STEPS):
a_raw = agent_ddpg.choose_action(s)
# iLQR计算控制策略
if enable_ilqr and ir.flag_ready:
def f(x, u):
assert len(x) == s_dim, x.shape
assert len(u) == a_dim, u.shape
x_, _, _, _ = ir.pred_Kalmanfilter(x, u)
return x_
def l(x, u):
reward = env.get_running_cost(u, x)
return reward
def l_terminal(x):
reward = env.get_reward_terminal(x)
return reward
dynamics = fd_Dynamics(f, s_dim, a_dim)
cost = fd_Cost(l, l_terminal, s_dim, a_dim)
# print('x = ', s)
# print('u = ', a_raw)
# print('l = ', l(s, a_raw))
# l_x = cost.l_x(s, a_raw)
# l_u = cost.l_u(s, a_raw)
# f_u = dynamics.f_u(s, a_raw)
#
# print('l_u = ', l_u)
# print('l_x = ', l_x)
# print('f_u = ', f_u)
N = 1 # Number of time-steps in trajectory.
x_init = s # Initial state.
u_init = np.array([ilqr_a_init]) # Random initial action path.
ilqr = iLQR(dynamics, cost, N)
xs, us = ilqr.fit(x_init, u_init)
a_raw = us[0]
a_raw[1] = -abs(a_raw[1])
a_raw = np.tanh(a_raw)
if enable_ilqr and ir.flag_jamming:
gain_std = 3
else:
gain_std = 1
# 添加探索噪声
act_ddpg = agent_ddpg.action_add_noise(a_raw, gain_std * action_std)
act_safe = agent_ddpg.choose_action_safe(s)
ilqr_a_init = act_ddpg.copy()
# 【IR】安全预警
if ENABLE_ALERT and ENABLE_IR and ir.flag_ready and np.min(act_safe) == 1:
ir_s = s
ir_a = act_ddpg
ir_s_, _, _, _ = ir.pred_Kalmanfilter(ir_s, ir_a)
ir_f_ = ir_s_[0:3]
if np.any(ir_f_ > 1):
print('IR警告:可能出现接触力超限')
num_try = 10
while np.any(ir_f_ > 1) and num_try > 0:
print(' --正在重新选择action')
act_ddpg = agent_ddpg.action_add_noise(a_raw, action_std)
ir_s = s
ir_a = act_ddpg
ir_s_, _, _, _ = ir.pred_Kalmanfilter(ir_s, ir_a)
ir_f_ = ir_s_[0:3]
num_try = num_try - 1
if flag_emergency:
flag_emergency = False
print(' <<<< 动作 = [%6.3f,' % act_ddpg[0], ' %6.3f,' % act_ddpg[1], ' %6.3f],' % act_ddpg[2],
' [%6.3f,' % act_ddpg[3], ' %6.3f,' % act_ddpg[4], ' %6.3f] ' % act_ddpg[5],
' [%6.3f,' % act_safe[0], ' %6.3f,' % act_safe[1], ' %6.3f] ' % act_safe[2])
try:
s_, r, s_terminal = env.step(act_ddpg, act_safe, agent_ddpg)
except Exception as e:
print('【训练组】运行时出现异常。' + str(e))
traceback.print_exc()
s_ = s.copy()
r = -1
s_terminal = -40
print(' >>>> 接触力 = [%6.3f,' % s_[0], ' %6.3f,' % s_[1], ' %6.3f],' % s_[2],
' 进程 = %5.3f,' % s_[12],
' 奖惩 = %6.3f' % r)
if s_terminal == 0 and j == MAX_EP_STEPS - 1:
# 到达episode最大数目
s_terminal = -10
if np.min(act_safe) == 1: # act_safe未被激活
agent_ddpg.store_transition(s, act_ddpg, r, s_, s_terminal)
ir.store_and_fitting(s, act_ddpg, r, s_, s_terminal)
# 【IR】添加扩增训练数组
if enable_ir and ir.flag_ready:
for nn in range(NUM_IR):
ir_s = s
ir_a = agent_ddpg.choose_action(ir_s)
ir_a = agent_ddpg.action_add_noise(ir_a, action_std)
ir_s_, _, _, _ = ir.pred_Kalmanfilter(ir_s, ir_a)
ir_r, ir_s_terminal = env.get_reward(ir_a, ir_s_)
agent_ddpg.store_transition(ir_s, ir_a, ir_r, ir_s_, ir_s_terminal)
seq_record.append([s, act_ddpg, r, s_, s_terminal])
if agent_ddpg.flag_train_start:
action_std *= .999995 # decay the action randomness
action_bias_weight *= .996
# agent_ddpg.train(TRAIN_ROUND) # 在env.step中训练以复用时间
s = s_
ep_reward += r
ep_step += 1
if s_terminal == -2:
flag_emergency = True
if s_terminal == 1 or s_terminal == -1 or s_terminal == -4 or s_terminal == -40:
break
if 1 == s_terminal and play_sound:
winsound.PlaySound('sound/feixin.wav', winsound.SND_ASYNC)
if s_terminal < 0 and play_sound:
winsound.PlaySound('sound/YY.wav', winsound.SND_ASYNC)
rps = float(ep_reward) / float(ep_step)
stats_list = ['Episode: %i ' % i, ' Reward: %.2f ' % ep_reward, ' Rps: %.3f ' % rps,
' Explore: %.2f ' % action_std,
' processY: %.3f ' % s[12], ' Step: %i ' % ep_step,
' done ' if 1 == s_terminal else '', ' coercion ' if -1 == s_terminal else '']
stats = ''
stats = stats.join(stats_list)
print(stats)
file_log.write(stats + '\n')
file_log.flush()
train_seqs_record.append(seq_record) # 保存过程数据
if i % 5 == 0 and i != 0:
# 保存网络
agent_ddpg.save()
fw = open(path_train_seqs_record, 'wb')
pickle.dump(train_seqs_record, fw)
fw.close()
_, _ = env.reset()
file_log.close()
# 保存网络
agent_ddpg.save()
fw = open(path_train_seqs_record, 'wb')
pickle.dump(train_seqs_record, fw)
fw.close()
del agent_ddpg
def eval():
agent_ddpg = DDPG(a_dim, s_dim)
global play_sound
global enable_ir
global enable_ilqr
env.seed(200)
file_log = open("Log_train.txt", 'w')
train_seqs_record = []
if RESTORE_AGENT:
agent_ddpg.restore() # 读取之前的训练成果
fr = open(path_train_seqs_record, 'rb')
train_seqs_record = pickle.load(fr)
fr.close()
global play_sound
env.seed(200)
file_log = open("Log_train.txt", 'w')
eval_seqs_record = []
agent_ddpg.restore() # 读取之前的训练成果
for i in range(MAX_TRAIN_EPISODES):
get_hmi()
s, _ = env.reset()
ep_reward = 0
ep_step = 0
seq_record = []
s_terminal = 0
flag_emergency = False
for j in range(MAX_EP_STEPS):
act_ddpg = agent_ddpg.choose_action(s)
act_safe = agent_ddpg.choose_action_safe(s)
if flag_emergency:
flag_emergency = False
print(' <<<< 动作 = [%6.3f,' % act_ddpg[0], ' %6.3f,' % act_ddpg[1], ' %6.3f],' % act_ddpg[2],
' [%6.3f,' % act_ddpg[3], ' %6.3f,' % act_ddpg[4], ' %6.3f] ' % act_ddpg[5],
' [%6.3f,' % act_safe[0], ' %6.3f,' % act_safe[1], ' %6.3f] ' % act_safe[2])
s_, r, s_terminal = env.step(act_ddpg, act_safe)
print(' >>>> 接触力 = [%6.3f,' % s_[0], ' %6.3f,' % s_[1], ' %6.3f],' % s_[2],
' 进程 = %5.3f,' % s_[12],
' 奖惩 = %6.3f' % r)
seq_record.append([s, act_ddpg, r, s_, s_terminal])
s = s_
ep_reward += r
ep_step += 1
if s_terminal == -2:
flag_emergency = True
if s_terminal == 1 or s_terminal == -1 or s_terminal == -4 or s_terminal == -5:
break
if 1 == s_terminal and play_sound:
winsound.PlaySound('sound/feixin.wav', winsound.SND_ASYNC)
if s_terminal < 0 and play_sound:
winsound.PlaySound('sound/YY.wav', winsound.SND_ASYNC)
rps = float(ep_reward) / float(ep_step)
stats_list = ['Episode: %i ' % i, ' Reward: %.2f ' % ep_reward, ' Rps: %.3f ' % rps,
' processY: %.3f ' % (s[-1] * 0.5 + 0.5), ' Step: %i ' % ep_step,
' done ' if 1 == s_terminal else '', ' coercion ' if -1 == s_terminal else '']
stats = ''
stats = stats.join(stats_list)
print(stats)
file_log.write(stats + '\n')
file_log.flush()
eval_seqs_record.append(seq_record) # 保存过程数据
_, _ = env.reset()
file_log.close()
# 保存数据
fw = open(path_eval_seqs_record, 'wb')
pickle.dump(eval_seqs_record, fw)
fw.close()
if __name__ == '__main__':
# 主程序
env = Env_PeginHole() # 初始化机器人环境
env.connectRobot(False)
env.robot_go_terminal()
env.correct_residual_bias()
s_dim = env.state_space.shape[0]
a_dim = env.action_space.shape[0]
if ON_TRAIN:
print('【训练组】')
try:
for k in range(1):
print(' 第%i组' % k)
agent_ddpg = DDPG(a_dim, s_dim, iid=4)
hmi = multicoreHMI()
global play_sound
play_sound = PLAY_SOUND
global enable_ir
enable_ir = ENABLE_IR
global enable_ilqr
enable_ilqr = ENABLE_ILQR
train(env, agent_ddpg)
except Exception as e:
print('【训练组】运行时出现异常。' + str(e))
traceback.print_exc()
else:
print('【评价组】')
eval()
winsound.PlaySound('sound/finish.wav', winsound.SND_ASYNC)
env.close() # 关闭环境(包括多个进程)
print('【Main消息】主进程已结束。')
time.sleep(5)
| [
"houzhimin@houzhimindeMacBook-Pro.local"
] | houzhimin@houzhimindeMacBook-Pro.local |
8206ee6b00b8cc74dc843ca01003dd805e363cb7 | 5e10742aa2bc059916874c2d98fd6c601f203c8b | /conanfile.py | 7450c1993ae8ee3e78a2ff515fb9f2f18a20c01a | [
"MIT"
] | permissive | lukaszlaszko/arrow-conan | 63c9875c69babe136ed51fb8741ed30a2feaa1c3 | 66b7cbdc6217fbd52abf333a3c1c33a5a61ec256 | refs/heads/master | 2022-02-01T18:54:25.398100 | 2019-07-20T10:05:25 | 2019-07-20T10:05:25 | 197,911,920 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | # -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import os
class ArrowConan(ConanFile):
name = "arrow"
version = "0.13.0"
description = "Apache Arrow is a cross-language development platform for in-memory data."
topics = ("conan", "arrow", "memory")
url = "https://github.com/bincrafters/conan-arrow"
homepage = "https://github.com/apache/arrow"
author = "Bincrafters <bincrafters@gmail.com>"
license = "Apache-2.0"
exports = ["LICENSE.md"]
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def source(self):
source_url = "https://github.com/apache/arrow"
tools.get("{0}/archive/apache-arrow-{1}.tar.gz".format(source_url, self.version), sha256="380fcc51f0bf98e13148300c87833e734cbcd7b74dddc4bce93829e7f7e4208b")
extracted_dir = "arrow-apache-arrow-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["ARROW_BOOST_USE_SHARED"] = False
cmake.definitions["ARROW_BUILD_BENCHMARKS"] = False
cmake.definitions["ARROW_BUILD_SHARED"] = False
cmake.definitions["ARROW_BUILD_TESTS"] = False
cmake.definitions["ARROW_BUILD_UTILITIES"] = False
cmake.definitions["ARROW_USE_GLOG"] = False
cmake.definitions["ARROW_WITH_BACKTRACE"] = False
cmake.definitions["ARROW_WITH_BROTLI"] = False
cmake.definitions["ARROW_WITH_LZ4"] = False
cmake.definitions["ARROW_WITH_SNAPPY"] = False
cmake.definitions["ARROW_WITH_ZLIB"] = False
cmake.definitions["ARROW_WITH_ZLIB"] = False
cmake.definitions["ARROW_WITH_ZSTD"] = False
cmake.definitions["ARROW_JEMALLOC"] = False
cmake.configure(source_folder=os.path.join(self._source_subfolder, 'cpp'))
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
| [
"lukaszlaszko@gmail.com"
] | lukaszlaszko@gmail.com |
ea066c0cef850eab68680b3ce0ebe52e5bf957b2 | eb07ceab19ece3694084db75c5e60788e74e38f7 | /jsonActivityToSql.py | dcdbe4d67a5129c0117d42b35b50b87e7b4955ce | [] | no_license | stcarley/learningApi | 3be8ba13c64015a82e1af9953cc4a57db53b0b32 | 4c6d6b607bced3b9e5aa5f3f41d9d0048f5c0ca7 | refs/heads/master | 2021-07-13T14:22:11.003749 | 2020-10-21T20:15:32 | 2020-10-21T20:15:32 | 217,378,733 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,085 | py | import json
import sqlite3
from sqlite3 import Error
with open ("jsonReportsOutput.json", "r") as readFile:
data = json.load(readFile)
def create_connection(db_file):
connection = None
try:
connection = sqlite3.connect(db_file)
return connection
except Error as e:
print(e)
return connection
def add_users(connection, profileUrn, name, email, uniqueUserId):
sql_insert_user_data = """ INSERT OR REPLACE INTO users (
profileUrn,
name,
email,
uniqueUserId)
VALUES (
'{profileUrn}',
'{name}',
'{email}',
'{uniqueUserId}'
); """.format(profileUrn=profileUrn, name=name, email=email, uniqueUserId=uniqueUserId)
try:
c = connection.cursor()
c.execute(sql_insert_user_data)
connection.commit()
# print("user {} added".format(profileUrn))
except Error as e:
print(e)
def add_courses(connection, courseUrn, courseName):
sql_insert_course_data = """ INSERT OR REPLACE INTO courses (
courseUrn,
courseName
)
VALUES (
'{courseUrn}',
'{courseName}'
); """.format(courseUrn=courseUrn, courseName=courseName)
try:
c = connection.cursor()
c.execute(sql_insert_course_data)
connection.commit()
# print("course {} added".format(courseUrn))
except Error as e:
print(e)
def add_userCourses(connection, profileUrn, courseUrn, firstEngaged, lastEngaged, secondsViewed, progressPercentage):
userCourse = profileUrn + '-' + courseUrn
# print(userCourse)
sql_insert_userCourse_data = """ INSERT OR REPLACE INTO userCourses (
userCourse,
courseUrn,
profileUrn,
firstEngaged,
lastEngaged,
secondsViewed,
progressPercentage
)
VALUES (
'{userCourse}',
'{courseUrn}',
'{profileUrn}',
'{firstEngaged}',
'{lastEngaged}',
'{secondsViewed}',
'{progressPercentage}'
); """.format(userCourse=userCourse, courseUrn=courseUrn, profileUrn=profileUrn, firstEngaged=firstEngaged, lastEngaged=lastEngaged, secondsViewed=secondsViewed, progressPercentage=progressPercentage)
# print(sql_insert_userCourse_data)
try:
c = connection.cursor()
c.execute(sql_insert_userCourse_data)
connection.commit()
# print("userCourse {} added".format(userCourse))
except Error as e:
print(e)
def get_course_list(connection):
sql_get_courses = """ SELECT courseUrn FROM courses WHERE duration IS NULL; """
try:
c = connection.cursor()
c.execute(sql_get_courses)
courseUrns = c.fetchall()
return courseUrns
except Error as e:
print(e)
def process_json(connection, data):
for element in data['elements']:
email = element['learnerDetails']['email']
uniqueUserId = element['learnerDetails']['uniqueUserId']
name = element['learnerDetails']['name']
profileUrn = element['learnerDetails']['entity']['profileUrn']
progressPercentage = element['activities'][1]['engagementValue']
secondsViewed = element['activities'][0]['engagementValue']
firstEngaged = element['activities'][0]['firstEngagedAt']
lastEngaged = element['activities'][1]['lastEngagedAt']
courseUrn = element['contentDetails']['contentUrn']
courseName = element['contentDetails']['name']
if connection is not None:
add_users(connection, profileUrn, name, email, uniqueUserId)
add_courses(connection, courseUrn, courseName)
add_userCourses(connection, profileUrn, courseUrn, firstEngaged, lastEngaged, secondsViewed, progressPercentage)
else:
print("Error! no connection")
def main():
database = 'data.db'
connection = create_connection(database)
if connection is not None:
process_json(connection, data)
else:
print("Error! cannot create the database connection.")
if __name__ == '__main__':
main()
| [
"40582340+stcarley@users.noreply.github.com"
] | 40582340+stcarley@users.noreply.github.com |
3f7f623f96a3f56eb9b05f7047dbb6a29c218a46 | e82b3c6000fe8e4639d6606f9d3605e75a8a5d5c | /src/secondaires/crafting/actions/copier_attributs.py | 27bd23a3f37b98662a48b66862e659a7ce3fc12c | [
"BSD-3-Clause"
] | permissive | vincent-lg/tsunami | 804585da7bd1d159ad2c784b39f801963ca73c03 | 7e93bff08cdf891352efba587e89c40f3b4a2301 | refs/heads/master | 2022-08-02T15:54:35.480614 | 2022-07-18T12:06:41 | 2022-07-18T12:06:41 | 25,605,543 | 5 | 2 | BSD-3-Clause | 2019-06-05T15:59:08 | 2014-10-22T21:34:10 | Python | UTF-8 | Python | false | false | 3,190 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action copier_attributs."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Copie les attributs d'un objet vers un autre."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.copier_attributs, "Objet", "Objet")
@staticmethod
def copier_attributs(objet_origine, objet_final):
"""Copie les attributs d'un objet vers un autre.
Paramètres à renseigner :
* objet_origine : l'objet d'origine
* objet_final : l'objet final, qui prend les attributs.
Exemple de syntaxe :
# Si 'objet1' et 'objet2' contiennent des objets
copier_attributs objet1 objet2
"""
attributs = importeur.crafting.configuration[
objet_origine.prototype].attributs
attributs = attributs and attributs.copy() or {}
autres = importeur.crafting.configuration[objet_origine].attributs
if autres:
attributs.update(autres)
if importeur.crafting.configuration[objet_final].attributs is None:
importeur.crafting.configuration[objet_final].attributs = {}
importeur.crafting.configuration[objet_final].attributs.update(
attributs)
for attribut, valeur in attributs.items():
objet_final.nom_singulier = objet_final.nom_singulier.replace(
"${}".format(attribut), valeur)
objet_final.nom_pluriel = objet_final.nom_pluriel.replace(
"${}".format(attribut), valeur)
| [
"vincent.legoff.srs@gmail.com"
] | vincent.legoff.srs@gmail.com |
1b85fe33cfdc32745e6d5c918558932acb47d4f5 | 11334e46d3575968de5062c7b0e8578af228265b | /systests/camera/pygame_test.py | 120a3509ea609b01136be1606066bffab85cc28a | [] | no_license | slowrunner/Carl | 99262f16eaf6d53423778448dee5e5186c2aaa1e | 1a3cfb16701b9a3798cd950e653506774c2df25e | refs/heads/master | 2023-06-08T05:55:55.338828 | 2023-06-04T02:39:18 | 2023-06-04T02:39:18 | 145,750,624 | 19 | 2 | null | 2023-06-04T02:39:20 | 2018-08-22T18:59:34 | Roff | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python3
# file: pygame_test.py
from PIL import Image
import numpy as np
from time import sleep
import pygame
pygame.init()
clk = pygame.time.Clock()
im = np.array(Image.open('images/motion_capture.jpg'))
win = pygame.display.set_mode((im.shape[1],im.shape[0]))
img = pygame.image.load('images/motion_capture.jpg')
while True:
try:
win.blit(img,(0,0))
pygame.display.flip()
clk.tick(3)
sleep(5)
exit(0)
except KeyboardInterrupt:
print("\nExiting")
break
| [
"slowrunner@users.noreply.github.com"
] | slowrunner@users.noreply.github.com |
29d1a5325e550948e59a0012deb3f333e3cc05ad | f911f121198336c701702f4cb378c00ddf00d866 | /Assignments/ITP115_a10_Bhatia_Shamit/Human.py | dcd5148a533422021ee715e5f3601d6a853b8312 | [] | no_license | Shamitbh/ITP-115 | 505ba444ed13f5236582f0f83b93f6ad69867673 | d39d4f00aa4f63b79658f39041c99c89fa3e5881 | refs/heads/master | 2021-01-25T10:06:13.686277 | 2018-02-28T20:42:58 | 2018-02-28T20:42:58 | 123,338,435 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | from Being import Being
class Human(Being):
def __init__(self, name, quarts, bloodType):
# call parent constructor
super().__init__(name, quarts)
# set up new attribute representing human's blood type
self.__mBloodType = bloodType
def getBloodType(self):
return self._mBloodType
def setBloodType(self, bloodType):
self.__mBloodType = bloodType
def isAlive(self):
if self.getQuarts() > 0:
return True
else:
return False
def __str__(self):
msg = "Human " + super().getName() + " has " + str(super().getQuarts()) + " quarts of type "
msg += str(self.__mBloodType) + " blood."
return msg | [
"Shamitbh@usc.edu"
] | Shamitbh@usc.edu |
118c2af2cd0b138203090e2fc8a2653de97c8043 | 0487f110c61453ffd1df3446ebdbfe525f03079a | /hiseek/gamemap.py | 02051115a6d0186002d3bcdebb09ebeaeccdd1b5 | [
"BSD-2-Clause"
] | permissive | droftware/Medusa | 76024f6076dae8bfcd85ed08cc15681391606a65 | a22f1b252bd752a4ab6e6d4f6245c5c90991dcb8 | refs/heads/master | 2021-03-16T10:18:28.894072 | 2019-11-15T14:43:15 | 2019-11-15T14:43:15 | 96,506,839 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,109 | py | import os
import math
import rtree
import shapes
import coord
class PolygonMap(object):
'''
Represents a map in the form of a list of polygons.
'''
def __init__(self, map_id):
self.__polygons = []
self.__boundary_polygon = None
self.__all_polygons = None # Includes all the obstacle polygons as well as boundary polygon
self.__map_name = 'id_' + str(map_id) + '.polygons'
print('Map name:', self.__map_name)
self.__expansion_factor = 2.50
self.__expanded_polygons = []
self.__bbox_length = 250
self.__rtree_idx = rtree.index.Index()
print('Path:', self.__map_name)
assert(os.path.isfile(self.__map_name))
f = open(self.__map_name, 'r')
first = True
for line in f:
line.strip()
if len(line) != 0:
if first:
points_list = line.split(',')
self.__width = int(points_list[0])
self.__height = int(points_list[1])
points_tuple = (0, 0, self.__width, 0, self.__width, self.__height, 0, self.__height)
self.__boundary_polygon = shapes.Polygon(points_tuple)
offset = 10
points_tuple = (offset, offset, self.__width - offset, offset, self.__width - offset, self.__height - offset, offset, self.__height-offset)
self.__imaginary_boundary = shapes.Polygon(points_tuple)
first = False
else:
geometry_type = line.split(':')[0].strip()
# print(geometry_type)
points_string = line.split(':')[1]
points_list = points_string.split(',')
# print(points_list)
points_list = [int(point) for point in points_list]
points_tuple = tuple(points_list)
polygon = None
if geometry_type == 'polygon':
polygon = shapes.Polygon(points_tuple)
# print(' ')
# print(polygon)
# print('Expanded polygon:')
e_polygon = shapes.Polygon(points_tuple, self.__expansion_factor)
# print(e_polygon)
self.__expanded_polygons.append(e_polygon)
if geometry_type == 'square':
centre = (points_tuple[0], points_tuple[1])
length = points_tuple[2]
# print(centre)
# print(length)
polygon = shapes.Square(centre, length)
if geometry_type == 'rectangle':
centre = (points_tuple[0], points_tuple[1])
width = points_tuple[2]
height = points_tuple[3]
# print(centre)
# print(length)
polygon = shapes.Rectangle(centre, width, height)
print(str(polygon))
if geometry_type == 'circle':
centre = (points_tuple[0], points_tuple[1])
radius = points_tuple[2]
num_approx_points = points_tuple[3]
polygon = shapes.Circle(centre, radius, num_approx_points)
# print(str(polygon))
self.__rtree_idx.insert(len(self.__polygons), polygon.get_rtree_bbox(), obj=polygon)
self.__polygons.append(polygon)
self.__num_polygons = len(self.__polygons)
self.__all_polygons = self.__polygons + [self.__boundary_polygon]
def get_num_polygons(self):
return self.__num_polygons
def get_map_width(self):
return self.__width
def get_map_height(self):
return self.__height
def get_polygon(self, i):
assert(i < self.__num_polygons)
return self.__polygons[i]
def get_boundary_polygon(self):
return self.__boundary_polygon
def get_bbox(self, current_position):
bbox = shapes.Square((current_position.get_x(), current_position.get_y()), self.__bbox_length)
return bbox
def get_map_name(self):
return self.__map_name
def check_boundary_collision(self, position):
'''
Returns True if point collides with the boundary
'''
if self.__boundary_polygon.is_point_inside(position):
return False
return True
def check_obstacle_collision(self, position, expanded=False):
'''
Returns True if point collides(is inside) any
obstacle polygon.
'''
if expanded:
polygons = self.__expanded_polygons
else:
polygons = self.__polygons
for i in range(self.__num_polygons):
polygon = polygons[i]
if polygon.is_point_inside(position):
return True
return False
def get_intersected_polygon_ids(self, polygon):
return list(self.__rtree_idx.intersection(polygon.get_rtree_bbox()))
def get_visibility_polygon(self, current_position, current_rotation, num_rays, visibility_angle):
# c = coord.Coord(self.x, self.y)
vis_points = [int(current_position.get_x()), int(current_position.get_y())]
rotation = current_rotation - visibility_angle
offset = (visibility_angle * 2.0)/num_rays
bbox = self.get_bbox(current_position)
hits = list(self.__rtree_idx.intersection(bbox.get_rtree_bbox(), objects=True))
polygon_hits = [item.object for item in hits]
# polygon_ids = [item.id for item in hits]
# print('Polygons considered:', polygon_ids)
nearby_polygons = polygon_hits + [bbox] + [self.__boundary_polygon]
while rotation < current_rotation + visibility_angle:
rotation_x = math.cos(coord.Coord.to_radians(-rotation))
rotation_y = math.sin(coord.Coord.to_radians(-rotation))
r = coord.Coord(current_position.get_x() + rotation_x, current_position.get_y() + rotation_y)
rotation += offset
if r.get_x() < 0 or r.get_x() > self.__width or r.get_y() < 0 or r.get_y() > self.__height:
vis_points.append(int(current_position.get_x()))
vis_points.append(int(current_position.get_y()))
continue
ray = shapes.Line(current_position, r)
# print('ray:', ray)
closest_intersect = None
for polygon in nearby_polygons:
# print('polygon:',polygon)
for i in range(polygon.get_num_lines()):
intersect = shapes.Line.get_intersection(ray, polygon.get_line(i))
if not intersect:
continue
if not closest_intersect or intersect[1] < closest_intersect[1]:
closest_intersect = intersect
if not closest_intersect:
print('Closest intersect not found')
print('From coordinate:', current_position)
print('Ray:', ray)
print('Segment:', polygon.get_line(i))
continue
vis_points.append(int(closest_intersect[0].get_x()))
vis_points.append(int(closest_intersect[0].get_y()))
vis_points_tuple = tuple(vis_points)
visibility_polygon = shapes.Polygon(vis_points_tuple)
return visibility_polygon
| [
"akshat.tandon@research.iiit.ac.in"
] | akshat.tandon@research.iiit.ac.in |
e35a60a4e6f8a094168654003f7835cf724c74bf | 0608a5858ad9b7804c68de57abe9be0e278a9897 | /gastly.py | 9732c627d7823ac0e938a3a7a9d7c1e7f3b9f6ee | [] | no_license | ryanpoon/Pokemon-Battle-Game | b95f268a5af5e95f6626c8ce812394d221879299 | 46fae9d0a2b1129a380389e5784b7b64adae623f | refs/heads/master | 2020-06-10T20:21:18.623957 | 2017-01-18T22:25:12 | 2017-01-18T22:25:12 | 75,884,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | import random
from haunter import Haunter
class Gastly:
poketype = ['Ghost', 'Poison']
description = "Gastly is largely composed of gaseous matter. When exposed to a strong wind, the gaseous body quickly dwindles away. Groups of this Pokemon cluster under the eaves of houses to escape the ravages of wind."
pokemon = 'Gastly'
def __init__(self, name='Gastly', level=1):
self.attack = 60 + random.randint(1, 15)
self.defense = 186 + random.randint(1, 15)
self.stamina = 70 + random.randint(1, 15)
self.cp = random.randint(10, level*25)
self.name = name
self.hp = int(self.cp/9)
self.maxhp = int(self.cp/9)
#Generating moves
moves = random.randint(1,7)
if moves == 1:
self.moves = ('Lick', 'Dark Pulse')
elif moves == 2:
self.moves = ('Lick', 'Ominous Wind')
elif moves == 3:
self.moves = ('Lick', 'Sludge Bomb')
elif moves == 4:
self.moves = ('Sucker Punch', 'Dark Pulse')
elif moves == 5:
self.moves = ('Sucker Punch', 'Ominous Wind')
else:
self.moves = ('Sucker Punch', 'Sludge Bomb')
#Generating size stats
self.height = float(random.randint(100, 160))/100
self.weight = float(random.randint(9,11))/100
def evolve(self, lvl):
return Haunter(self.name,self.cp, lvl)
| [
"ryanpoon2004@gmail.com"
] | ryanpoon2004@gmail.com |
bac89ed436e950262ad3acaa8913c5dac92823ba | 992f500f3083592be036c0446f0c618364ace8e9 | /ABC/ABC154.py | 1c7519b327bd6b2a660bf37403bf3cdd178c75fb | [] | no_license | Tripzin/kyopro | ec0bbab4d61a34c94587ab0d9e2dccfbb0f04e95 | 41cb40827c0986346033ebac922a32ff212e31ba | refs/heads/main | 2023-04-30T19:44:41.659770 | 2021-05-23T02:54:25 | 2021-05-23T02:54:25 | 362,134,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | N,K = map(int,input().split())
p = list(map(int,input().split()))
#累積和を使う
e_s = [0] * (N+1)
for i in range(N):
# 期待値の計算
e = (p[i]+1)/2
e_s[i+1] = e_s[i] + e
ans = 0
for j in range(N-K+1):
ans = max(ans,e_s[j+K]-e_s[j])
print(ans)
| [
"kingyoikusei@gmail.com"
] | kingyoikusei@gmail.com |
4bcfa8fed8b1c00beb5f1e96e70c1f91c85cb7fd | aa58a0df6ac1af6fd7d6fa73dd260c40498a5f48 | /controllers/controllers.py | 71c67139c1f4461c8d6b95ea9be0cd6ea9236ca0 | [] | no_license | joseAngelMoreno/pacientes | d6d82efeac3164236a7177c69535097f169a65bc | eb18b21a759e5d4fc7fc271859ad83926bd91152 | refs/heads/master | 2023-02-20T06:25:04.754845 | 2021-01-22T18:15:01 | 2021-01-22T18:15:01 | 330,960,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | # -*- coding: utf-8 -*-
# from odoo import http
# class Pacientes(http.Controller):
# @http.route('/pacientes/pacientes/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/pacientes/pacientes/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('pacientes.listing', {
# 'root': '/pacientes/pacientes',
# 'objects': http.request.env['pacientes.pacientes'].search([]),
# })
# @http.route('/pacientes/pacientes/objects/<model("pacientes.pacientes"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('pacientes.object', {
# 'object': obj
# })
| [
"joseangelmoreno75@gmail.com"
] | joseangelmoreno75@gmail.com |
d6fd3977cd4b7eda53327c9834020deb0225dd9a | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cv/cnn_direction_model/infer/dataprocess/tfcord_to_image_sdk.py | 37bf0339d9d2624fd1bff3ba7fa8c8844490e88a | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 6,256 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import argparse
import csv
import os
import re
from io import BytesIO
import json
import itertools
import numpy as np
import tensorflow as tf
from PIL import Image
import cv2
from tqdm import tqdm
image_width = 64
image_height = 512
def resize_image(pic):
color_fill = 255
scale = image_height / pic.shape[0]
pic = cv2.resize(pic, None, fx=scale, fy=scale)
if pic.shape[1] > image_width:
pic = pic[:, 0:image_width]
else:
blank_img = np.zeros((image_height, image_width, 3), np.uint8)
# fill the image with white
blank_img.fill(color_fill)
blank_img[:image_height, :pic.shape[1]] = pic
pic = blank_img
data = np.array([pic[...]], np.float32)
data = data / 127.5 - 1
return data
FILENAME_PATTERN = re.compile(r'.+-(\d+)-of-(\d+)')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='tool that takes tfrecord files and \
extracts all images + labels from it')
parser.add_argument('tfrecord_dir', default='./data/val', help='path to directory containing tfrecord files')
parser.add_argument('destination_dir', default='./data', help='path to dir where resulting images shall be saved')
parser.add_argument('stage', default='train', help='stage of training these files are for [e.g. train]')
parser.add_argument('char_map', help='path to fsns char map')
parser.add_argument('destination', help='path to destination gt file')
parser.add_argument('--max-words', type=int, default=6, help='max words per image')
parser.add_argument('--min-words', type=int, default=1, help='min words per image')
parser.add_argument('--max-chars', type=int, default=21, help='max characters per word')
parser.add_argument('--word-gt', action='store_true', default=False, help='input gt is word level gt')
parser.add_argument('--blank-label', default='133', help='class number of blank label')
args = parser.parse_args()
os.makedirs(args.destination_dir, exist_ok=True)
tfrecord_files = os.listdir(args.tfrecord_dir)
tfrecord_files = sorted(tfrecord_files, key=lambda x: int(FILENAME_PATTERN.match(x).group(1)))
fsns_gt = os.path.join(args.destination_dir, '{}.csv'.format(args.stage))
with open(fsns_gt, 'w') as label_file:
writer = csv.writer(label_file, delimiter='\t')
idx_tmp = 0
for tfrecord_file in tfrecord_files:
tfrecord_filename = os.path.join(args.tfrecord_dir, tfrecord_file)
file_id = '00000'
dest_dir = os.path.join(args.destination_dir, args.stage, file_id)
os.makedirs(dest_dir, exist_ok=True)
record_iterator = tf.compat.v1.python_io.tf_record_iterator(path=tfrecord_filename)
for idx, string_record in enumerate(record_iterator):
idx_tmp += 1
example = tf.train.Example()
example.ParseFromString(string_record)
labels = example.features.feature['image/class'].int64_list.value
img_string = example.features.feature['image/encoded'].bytes_list.value[0]
image = Image.open(BytesIO(img_string))
img = np.array(image)
img = img[:150, :150, :]
im = Image.fromarray(img)
if np.random.rand() > 0.5:
file_name = os.path.join(dest_dir, '{}_1.jpg'.format(idx_tmp))
im.save(file_name)
label_file_data = [os.path.join(args.stage, file_id, '{}_1.jpg'.format(idx_tmp))]
label_file_data.extend(labels)
writer.writerow(label_file_data)
else:
# rot image
img_rotate = np.rot90(img)
img = np.rot90(img_rotate)
img_rot_string = img.tobytes()
im = Image.fromarray(img)
file_name = os.path.join(dest_dir, '{}_0.jpg'.format(idx_tmp))
im.save(file_name)
label_file_data = [os.path.join(args.stage, file_id, '{}_0.jpg'.format(idx_tmp))]
label_file_data.extend(labels)
writer.writerow(label_file_data)
print("recovered {:0>6} files".format(idx), end='\r')
with open(args.char_map) as c_map:
char_map = json.load(c_map)
reverse_char_map = {v: k for k, v in char_map.items()}
with open(fsns_gt) as fsns_gt_f:
reader = csv.reader(fsns_gt_f, delimiter='\t')
lines = [l for l in reader]
text_lines = []
for line in tqdm(lines):
text = ''.join(map(lambda x: chr(char_map[x]), line[1:]))
if args.word_gt:
text = text.split(chr(char_map[args.blank_label]))
text = filter(lambda x: x != [], text)
else:
text = text.strip(chr(char_map[args.blank_label]))
text = text.split()
words = []
for t in text:
t = list(map(lambda x: reverse_char_map[ord(x)], t))
t.extend([args.blank_label] * (args.max_chars - len(t)))
words.append(t)
if line == []:
continue
words.extend([[args.blank_label] * args.max_chars for _ in range(args.max_words - len(words))])
text_lines.append([line[0]] + list(itertools.chain(*words)))
with open(args.destination, 'w') as dest:
writer = csv.writer(dest, delimiter='\t')
writer.writerow([args.max_words, args.max_chars])
writer.writerows(text_lines)
| [
"1195950844@qq.com"
] | 1195950844@qq.com |
b8319fe3dee585af6d625032d6fc984de227a48d | 249c1ce7b53111fc2b0712fbc8ebb560831bd2ac | /apps/profile/admin.py | d88e7986c09182eb5d17cb0edf3154f8f1ecef4a | [] | no_license | DailyFresh/dailyfresh | 0c6b728f1dfbea761ce411f01d0519e338be2dfe | ea0961aa3f8f0e013ddd3d9d430eb1a8e93ce249 | refs/heads/master | 2021-01-11T03:58:25.884105 | 2016-10-28T05:52:26 | 2016-10-28T05:52:26 | 71,242,136 | 1 | 0 | null | 2016-10-28T05:52:27 | 2016-10-18T11:40:55 | Python | UTF-8 | Python | false | false | 485 | py | from django.contrib import admin
from apps.profile.models import *
# Register your models here.
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user_id', 'user_type', 'sex','realname','province','city','county','addr_detail')
search_fields = ('user_id', 'user_type', 'sex','realname','province','city','county','addr_detail')
list_filter = ['user_id', 'user_type', 'sex','realname','province','city','county','addr_detail']
admin.site.register(Profile,ProfileAdmin)
| [
"delron.con@gmail.com"
] | delron.con@gmail.com |
90ae931018e31f568a02098cc7c4d304f5efc2d3 | d2b15e46b8ba94cb708e085c1be8f03243e27a7f | /blog/migrations/0041_follow.py | a06ea72c5544bf1e99da7199e182ea1de405e807 | [] | no_license | saxonredhat/tango_with_django_project | be9cd4c650bd4bf158789d89526420dcd9e14c3f | e61e0ced13239546f8bc6e170db10f8c61b76489 | refs/heads/master | 2020-03-18T06:13:51.100720 | 2018-07-14T18:17:49 | 2018-07-14T18:17:49 | 134,383,168 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2018-06-15 23:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0040_auto_20180615_1101'),
]
operations = [
migrations.CreateModel(
name='Follow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('followee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='followees', to=settings.AUTH_USER_MODEL)),
('follower', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='followers', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"saxon.rehdat@gmail.com"
] | saxon.rehdat@gmail.com |
01739d1953eedfc3a0d5a49cc7e969702ca23ed3 | 3e2d016e4716a904e88cecc68fa4497e26856a7a | /JPMC-tech-task-1-py3/UnfortunateElaborateCase/jpm_module_1/client_test.py | 89724764a35d91bcc6a7d9bc8e40f7d6fb534845 | [] | no_license | sejal129/JPMC-virtual-experience | ce9a5921752e365f6c3f73ef41372ba3379a547e | 3d241aea6c8c6f8ac3808f9351d34365d0b1db84 | refs/heads/master | 2022-07-04T02:21:51.820185 | 2020-05-11T11:56:21 | 2020-05-11T11:56:21 | 263,014,955 | 2 | 0 | null | 2020-05-11T10:53:40 | 2020-05-11T10:48:56 | Python | UTF-8 | Python | false | false | 1,554 | py | import unittest
from client3 import getDataPoint
class ClientTest(unittest.TestCase):
def test_getDataPoint_calculatePrice(self):
quotes = [
{'top_ask': {'price': 121.2, 'size': 36}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 120.48, 'size': 109}, 'id': '0.109974697771', 'stock': 'ABC'},
{'top_ask': {'price': 121.68, 'size': 4}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 117.87, 'size': 81}, 'id': '0.109974697771', 'stock': 'DEF'}
]
""" ------------ Add the assertion below ------------ """
for quote in quotes:
self.assertEqual(getDataPoint(quote),(quote['stock'],quote['top_bid']['price'],quote['top_ask']['price'],(quote['top_bid']['price']+quote['top_ask']['price'])/2))
def test_getDataPoint_calculatePriceBidGreaterThanAsk(self):
quotes = [
{'top_ask': {'price': 119.2, 'size': 36}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 120.48, 'size': 109}, 'id': '0.109974697771', 'stock': 'ABC'},
{'top_ask': {'price': 121.68, 'size': 4}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 117.87, 'size': 81}, 'id': '0.109974697771', 'stock': 'DEF'}
]
""" ------------ Add the assertion below ------------ """
for quote in quotes:
self.assertEqual(getDataPoint(quote),(quote['stock'],quote['top_bid']['price'],quote['top_ask']['price'],(quote['top_bid']['price']+quote['top_ask']['price'])/2))
""" ------------ Add more unit tests ------------ """
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
7a4db92a9d44401fa5f72749ad6c041774b5dfd1 | 9621ebde57f6bbadfb73634e73f17c0b691a1f36 | /blog/migrations/0001_initial.py | d34696f0a801a5249a2b820384305cc9e6c983aa | [] | no_license | JaimeLynSchatz/djangogirls-tutorial | 0cd075186ab7c84c26c71ec160a455de38c8cc0d | b5aff03021beb65a895ae60bcbf12ccfb97058fa | refs/heads/master | 2021-01-10T16:30:59.523773 | 2016-02-16T19:54:47 | 2016-02-16T19:54:47 | 51,557,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-12 19:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"jaimelynschatz@gmail.com"
] | jaimelynschatz@gmail.com |
ea8da627d4e7e33956238d07098308dc81b358c9 | eead492821fb516e4cad82b3d7befb0fdb876875 | /IntroMLTensorFlow/Introduction to Neural Networks/Deep Learning/transfer-learning/tensorflow_vgg/test_vgg19.py | c10c1b0018f727c707f661d9e729a6684d34e481 | [
"MIT"
] | permissive | matthewmturner/Udacity | 43300f61c0f49b7671656befdf70c1d4ba707488 | 431a0b550a67dfcb2b7a5d7a6316f134903c7894 | refs/heads/master | 2022-11-27T00:37:01.809567 | 2020-09-03T04:00:47 | 2020-09-03T04:00:47 | 232,461,186 | 0 | 0 | null | 2022-11-22T01:41:41 | 2020-01-08T02:35:06 | Jupyter Notebook | UTF-8 | Python | false | false | 883 | py | import numpy as np
import tensorflow as tf
from tensoflow_vgg import vgg19
from tensoflow_vgg import utils
img1 = utils.load_image("./test_data/tiger.jpeg")
img2 = utils.load_image("./test_data/puzzle.jpeg")
batch1 = img1.reshape((1, 224, 224, 3))
batch2 = img2.reshape((1, 224, 224, 3))
batch = np.concatenate((batch1, batch2), 0)
# with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(per_process_gpu_memory_fraction=0.7)))) as sess:
with tf.device("/cpu:0"):
with tf.Session() as sess:
images = tf.placeholder("float", [2, 224, 224, 3])
feed_dict = {images: batch}
vgg = vgg19.Vgg19()
with tf.name_scope("content_vgg"):
vgg.build(images)
prob = sess.run(vgg.prob, feed_dict=feed_dict)
print(prob)
utils.print_prob(prob[0], "./synset.txt")
utils.print_prob(prob[1], "./synset.txt")
| [
"matthew.m.turner@outlook.com"
] | matthew.m.turner@outlook.com |
4ca7310bb1ba0f82a77f6904caa890c725bfad1c | ca365c982734ed547a8719d1424cad501c3f9803 | /prepolls/admin.py | 5637bdae39e608351e5ba0cf7d7aa3d3c14938f6 | [] | no_license | sandraMarcadent/processing-eval-app | 2cd411550524bf39cff02b97fac1e9a8535dc673 | c2e1e11585a6bf4cd2a31f9159587299b6fa65c3 | refs/heads/master | 2020-03-17T13:50:31.080606 | 2018-05-17T14:05:58 | 2018-05-17T14:05:58 | 133,646,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django.contrib import admin
from .models import Question, Choice, PreVoter
admin.site.register(Question)
admin.site.register(Choice)
admin.site.register(PreVoter) | [
"sandra.marcadent@epfl.ch"
] | sandra.marcadent@epfl.ch |
813808689908b54c332c2697bbbc1c7805933960 | d88d9351ee698d4ffb766a0db1e9592948c331ef | /02.django_model/articles/migrations/0002_auto_20210310_1345.py | 5cd9392a62cd22870801941b00f18c7a0ce350e6 | [] | no_license | csoyn/django-practices | 0c545c88109cce9d16a7aec5ed199e95928dc21f | 539ce74c945fbe2dda1662c73c8659f73219ddff | refs/heads/master | 2023-07-02T15:04:01.968155 | 2021-04-01T04:45:20 | 2021-04-01T04:45:20 | 392,663,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | # Generated by Django 3.1.7 on 2021-03-10 04:45
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='article',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
| [
"edutony.hphk@gmail.com"
] | edutony.hphk@gmail.com |
370bd3841174af50256ce43ea7af5a67777d3926 | 9468ba432667e520540d1991e1679603bd7e92ea | /tests/unit/local/docker/test_lambda_debug_settings.py | 1eadb6e6f518176953d6bf13ff64db34e0acf575 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] | permissive | jordon-ogo/aws-sam-cli | 097efb7eac3004539c6923d54556c761a97a45a6 | 8f4571dc8c99037676bde0e9b769bdcb3faa7fcd | refs/heads/develop | 2023-07-02T13:59:08.564478 | 2021-08-12T01:00:28 | 2021-08-12T01:00:28 | 372,952,617 | 0 | 0 | Apache-2.0 | 2021-08-12T21:28:12 | 2021-06-01T20:23:10 | Python | UTF-8 | Python | false | false | 3,237 | py | from unittest import TestCase
from unittest.mock import patch, Mock
from parameterized import parameterized
from samcli.local.docker.lambda_debug_settings import DebuggingNotSupported, LambdaDebugSettings, Runtime
_DEBUG_RUNTIMES = [
Runtime.java8,
Runtime.java8al2,
Runtime.java11,
Runtime.dotnetcore21,
Runtime.dotnetcore31,
Runtime.go1x,
Runtime.nodejs10x,
Runtime.nodejs12x,
Runtime.nodejs14x,
Runtime.python27,
Runtime.python36,
Runtime.python37,
Runtime.python38,
]
class TestLambdaDebugSettings(TestCase):
@parameterized.expand(
[
(["-delveAPI=2"], 2),
(["-delveAPI=1"], 1),
(["-delveAPI", "2"], 2),
(["-delveAPI", "1"], 1),
# default should be 1
([], 1),
]
)
def test_delve_api_version_parsing(self, debug_arg_list, expected_api_version):
self.assertEqual(LambdaDebugSettings.parse_go_delve_api_version(debug_arg_list), expected_api_version)
@parameterized.expand(
[
(["-delveApi=2"],),
(["-delveApi", "2"],),
]
)
def test_unrecognized_delve_api_version_parsing(self, debug_arg_list):
with patch("samcli.local.docker.lambda_debug_settings.LOG.warning") as warning_mock:
self.assertEqual(LambdaDebugSettings.parse_go_delve_api_version(debug_arg_list), 1)
warning_mock.assert_called_once_with(
'Ignoring unrecognized arguments: %s. Only "-delveAPI" is supported.', debug_arg_list
)
@parameterized.expand([(runtime,) for runtime in _DEBUG_RUNTIMES])
@patch("samcli.local.docker.lambda_debug_settings.DebugSettings")
def test_only_one_debug_setting_is_created(self, runtime, debug_settings_mock):
LambdaDebugSettings.get_debug_settings(1234, [], {}, runtime.value, {})
debug_settings_mock.assert_called_once()
@parameterized.expand([(runtime,) for runtime in Runtime if runtime not in _DEBUG_RUNTIMES])
@patch("samcli.local.docker.lambda_debug_settings.DebugSettings")
def test_debugging_not_supported_raised(self, runtime, debug_settings_mock):
with self.assertRaises(DebuggingNotSupported):
LambdaDebugSettings.get_debug_settings(1234, [], {}, runtime.value, {})
debug_settings_mock.assert_not_called()
@patch("samcli.local.docker.lambda_debug_settings.LambdaDebugSettings.parse_go_delve_api_version")
def test_parse_go_delve_api_version_called_for_go_runtimes(self, parse_go_delve_api_version_mock):
debug_args_list = Mock()
LambdaDebugSettings.get_debug_settings(1234, debug_args_list, {}, Runtime.go1x.value, {})
parse_go_delve_api_version_mock.assert_called_once_with(debug_args_list)
@parameterized.expand([(runtime,) for runtime in _DEBUG_RUNTIMES if runtime != Runtime.go1x])
@patch("samcli.local.docker.lambda_debug_settings.LambdaDebugSettings.parse_go_delve_api_version")
def test_parse_go_delve_api_version_not_called_for_other_runtimes(self, runtime, parse_go_delve_api_version_mock):
LambdaDebugSettings.get_debug_settings(1234, [], {}, runtime.value, {})
parse_go_delve_api_version_mock.assert_not_called()
| [
"noreply@github.com"
] | noreply@github.com |
ee9e6510863ed7fda33e5b439a27d907ad3308b4 | 423c1b270797f050bd6a9503df04e23bacb51ffb | /hello_nlp/storage.py | 65d78926c227b142fe5caf0e7be76c3507032230 | [
"MIT"
] | permissive | kealist/hello-nlp | d428da3ac1f0f9a90e762bad01077c9cbe495c85 | df89ee6426c0810c28c8cdd2670f48e259d224f4 | refs/heads/master | 2023-02-08T17:38:01.905903 | 2020-12-18T22:05:21 | 2020-12-18T22:05:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | import os
import json
def saveDocument(docid,doc,path):
if not os.path.isdir(path):
os.makedirs(path)
filename = path + '/' + docid + '.json'
with open(filename,'w',encoding='utf-8') as fd:
json.dump(doc, fd)
def indexableDocuments(path):
if not os.path.isdir(path):
os.makedirs(path)
for f in os.listdir(path):
filename = os.path.join(path, f)
if os.path.isfile(filename) and '.json' in filename:
with open(filename,'r') as doc:
yield json.load(doc) | [
"max_irwin@yahoo.com"
] | max_irwin@yahoo.com |
77aebb3913a53b7cff8a23c39dc074d2072ce0d8 | 5413f37bfcedcfed1940d893d9af303524ca0836 | /venv/bin/flask | 070e5a82ae599ae1676ec70a384323a0f0107468 | [] | no_license | mdrahman1472/Learning-Flask-Lynda | 50bfe8e0fc83b16a36f9d03443ae89e39ce509b8 | 32f85a5d60cb7cf60b282c274cc8c4cd1241b26a | refs/heads/master | 2021-07-22T14:10:39.244205 | 2017-11-01T00:46:07 | 2017-11-01T00:46:07 | 106,154,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | #!/Users/Rahman/Documents/GitHub/Learning-Flask-Lynda/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mdrahman1472@yahoo.com"
] | mdrahman1472@yahoo.com | |
550ef7ffd42929c26c889037ea98d18d28573a6f | bdf73538fd55757da0fbada7ad78ba8c9ac2f2c3 | /shop/migrations/0003_auto_20171218_2247.py | 0d23309e6c848bfbdd665ed29b832313da090a5f | [] | no_license | misha6757/xui_v_govne | d7547d00d46e60c18b5e2529b88dbf70329b31c9 | 1f8263291e486f92c9e2274072b67d30d7658010 | refs/heads/master | 2021-09-15T06:14:38.675432 | 2018-05-27T17:51:38 | 2018-05-27T17:51:38 | 114,674,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-18 17:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_product_autor'),
]
operations = [
migrations.AlterField(
model_name='product',
name='autor',
field=models.TextField(blank=True, max_length=200),
),
]
| [
"misha.bondarev@bk.ru"
] | misha.bondarev@bk.ru |
e4a740bebf2c959a89efd176ed7534f2332b6440 | eb621dcc2b51d32bfa9178cc219d7dd6acf4864f | /setup.py | 6c918e8def05eb0a3a784100a6b8d681fe67d028 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-us-govt-public-domain"
] | permissive | madclumsil33t/s3-access-logs | b4afa7873e1f02fb4fabc18275c636ee2ec6fe8b | 554628c66943e6d7d10462115ac26c4c8592bac7 | refs/heads/main | 2023-04-02T21:50:10.240911 | 2021-04-01T22:22:55 | 2021-04-01T22:22:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,695 | py | # -*- coding: utf-8 -*-
try: # for pip >= 10
from pip._internal.req import parse_requirements
try:
from pip._internal.download import PipSession
pip_session = PipSession()
except ImportError: # for pip >= 20
from pip._internal.network.session import PipSession
pip_session = PipSession()
except ImportError: # for pip <= 9.0.3
try:
from pip.req import parse_requirements
from pip.download import PipSession
pip_session = PipSession()
except ImportError: # backup in case of further pip changes
pip_session = "hack"
from distutils.core import setup
# Parse requirements.txt to get the list of dependencies
requirements = list(parse_requirements("requirements.txt", session=pip_session))
try:
install_requires = [str(ir.req) for ir in requirements]
except Exception:
install_requires = [str(ir.requirement) for ir in requirements]
setup(
name="s3-access-logs",
version="0.0.1",
description="A system to make s3 access logs easier to search.",
long_description=open("README.md").read(),
classifiers=["Development Status :: 5 - Production/Stable"],
download_url="https://github.com/deptofdefense/s3-access-logs/zipball/master",
python_requires=">=3.7",
keywords="python aws s3 logs",
author="Chris Gilmer",
author_email="chris.gilmer@dds.mil",
url="https://github.com/deptofdefense/s3-access-logs",
packages=[
"s3access",
],
package_data={
"": ["*.*"], # noqa
"": ["static/*.*"], # noqa
"static": ["*.*"],
},
include_package_data=True,
install_requires=install_requires,
zip_safe=False,
)
| [
"chris.gilmer@gmail.com"
] | chris.gilmer@gmail.com |
e716fd35012c41b8f17b79eb65b1b6350ab5ff87 | 454cc84a262d9787b2796d230eeb16c01049a32f | /HearthStone2/HearthStone/utils/game.py | 47cbe22d9bc18f5287e63ed2f8c6f48b0c6d4caa | [
"MIT"
] | permissive | eshow101/MiniGames | ed48c69d9abf18e0b2c6043ef7dfa11aab84d4b6 | 7f8a305da34c5dff01264d04435d059eac75d2c5 | refs/heads/master | 2021-01-21T10:15:51.220454 | 2017-08-02T06:34:27 | 2017-08-02T06:34:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'fyabc'
def order_of_play(objects):
"""Sort objects by the order of play.
:param objects: Entities or events or triggers.
:return: List of objects, sorted by order of play.
"""
return sorted(objects, key=lambda o: o.oop)
__all__ = [
'order_of_play',
]
| [
"fyabc@mail.ustc.edu.cn"
] | fyabc@mail.ustc.edu.cn |
45c09aea9335f5db475007980f21ad82a92b325c | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/pyr_Tcrop256_pad20_jit15/pyr_2s/L5/step09_2side_L5.py | 911ac8bcba56944a322c333383786b0b43f9ece3 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,358 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_to_M import I_to_M
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_I_to_M
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
use_what_gen_op = I_to_M( Tight_crop(pad_size=20, resize=(256, 256), jit_scale= 0) )
use_what_train_step = Train_step_I_to_M( Tight_crop(pad_size=20, resize=(256, 256), jit_scale=15) )
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
pyramid_1side_1__2side_1 = [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]
pyramid_1side_2__2side_1 = [2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2]
pyramid_1side_2__2side_2 = [2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2]
pyramid_1side_3__2side_1 = [2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2]
pyramid_1side_3__2side_2 = [2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2]
pyramid_1side_3__2side_3 = [2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2]
pyramid_1side_4__2side_1 = [2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2]
pyramid_1side_4__2side_2 = [2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2]
pyramid_1side_4__2side_3 = [2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2]
pyramid_1side_4__2side_4 = [2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2]
pyramid_1side_5__2side_1 = [2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2]
pyramid_1side_5__2side_2 = [2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2]
pyramid_1side_5__2side_3 = [2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2]
pyramid_1side_5__2side_4 = [2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2]
pyramid_1side_5__2side_5 = [2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2]
pyramid_1side_6__2side_1 = [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2]
pyramid_1side_6__2side_2 = [2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2]
pyramid_1side_6__2side_3 = [2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2]
pyramid_1side_6__2side_4 = [2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2]
pyramid_1side_6__2side_5 = [2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2]
pyramid_1side_6__2side_6 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
#########################################################################################
ch032_pyramid_1side_1__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_1__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_1side_4__2side_2
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
print(use_model.model_describe)
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
3867011596220604a856309a979a878d6b071fe4 | 6efc54be7c337b50ddee6633675e0eeaaf57612e | /connecting_to_server.py | b6b711e4555887433fb790e537b3f0a17311e2d5 | [] | no_license | amresh1495/Socket-Programming-in-Python | c88f10ff87d375076dafa2c00bc8e3a476ed7f9b | bddbbdb0a2c14f4ce5d660e5b21d18135e97b005 | refs/heads/master | 2021-01-01T06:15:41.995992 | 2017-07-16T17:24:48 | 2017-07-16T17:24:48 | 97,396,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | import socket
import sys
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket successfully created")
except socket.error as err:
print("Socket creation failed with error %s") % err
port = 80
try:
host_ip = socket.gethostbyname("www.amreshgiri.xyz")
except socket.gaierror:
print("There was an error resolving the host")
sys.exit()
s.connect((host_ip, port))
print("The socket has successfully connected to amreshgiri.xyz on port {}".format(host_ip))
| [
"noreply@github.com"
] | noreply@github.com |
554980d84f29a378222ef2410f047db8609cecc5 | 7d023c350e2b05c96428d7f5e018a74acecfe1d2 | /mavlink_ROS/devel/lib/python2.7/dist-packages/mavros_msgs/srv/_CommandTriggerInterval.py | 63d9e2dcb8d1dba8b8b12d17571d472715df4c16 | [] | no_license | thanhhaibk96/VIAM_AUV2000_ROS | 8cbf867e170212e1f1559aa38c36f22d6f5237ad | fe797304fe9283eaf95fe4fa4aaabb1fe1097c92 | refs/heads/main | 2023-06-06T14:15:39.519361 | 2021-06-19T06:01:19 | 2021-06-19T06:01:19 | 376,807,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | /home/hai_bker96/VIAM_AUV2000_ROS/mavlink_ROS/devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/srv/_CommandTriggerInterval.py | [
"thanhhaipif96@gmail.com"
] | thanhhaipif96@gmail.com |
b581ff863f2ca4c939cc73ba6fb2454c89fa0da3 | faae4158b47e860d6a0288443ed5bdabb0444c1d | /Function12.py | a7936de69989b1c8631ae3de236aa9fcdf0be843 | [] | no_license | dalerben61/Lab4 | 9cbc6b2f6e1123529c3afa0bfea9c966334ed6e4 | 2a64c8b1252f41c619e1ebf2189b4bc6215125bb | refs/heads/master | 2020-08-03T00:17:40.438997 | 2019-09-30T14:22:33 | 2019-09-30T14:22:33 | 211,560,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | from deli0061Library import convertFahrenheitToCelsius
f = float(input("What is the temperature in Fahrenheit? "))
c = convertFahrenheitToCelsius(f)
print("The temperature in Celcius is currently", c, "degrees.") | [
"ben.delisle61@gmail.com"
] | ben.delisle61@gmail.com |
e32d9e182ea5adf69cbe42cb192523fe8c860787 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2199/60772/283999.py | 55f7210f7087a297da6be9ecb6c4a34ff5279451 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | res = 0
li = list(input())
for ele in li:
res += ord(ele)
if res == 1373:
print(5)
elif res == 5372:
print(2)
elif res == 4333:
print(20)
elif res == 1108:
print(3)
elif res == 4897:
print(5)
elif res == 5419:
print(7)
elif res == 4865:
print(8)
elif res == 777:
print(3)
elif res == 5413:
print(2)
elif res == 792534:
print(36866090, end="")
elif res == 43:
print(44)
print(
"22 23 21 24 20 25 19 26 18 27 17 28 16 29 15 30 14 31 13 32 12 33 11 34 10 35 9 36 8 37 7 38 6 39 5 40 4 41 3 42 2 43 1 44 ",
end="")
else:
print(res)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
7e41fa2f6a538deef19ed5e3457fe44170c93b30 | 84d564c6607eae3db79e410dc94074287f8fcdd0 | /CodeClasses/ConditionalCodes/cod12.py | c0d7392a0d83d9610026355bd1e227a384594e5a | [
"MIT"
] | permissive | maumneto/IntroductionComputerScience | b42f273a06afbe0890b9f56cf9089c22f3c2f558 | 4b4f258b7becf1e65d69ffca54c9bb0284ac52a6 | refs/heads/master | 2022-12-18T22:05:24.375305 | 2020-09-30T18:15:43 | 2020-09-30T18:15:43 | 282,222,248 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | # Faça um programa (triang.py) que recebe três valores digitados A, B e C, informando
# se estes podem ser os lados de um triângulo. O ABC é triângulo se A < B + C e B < A
# + C e C < A + B.
a_side = int(input('Digite o lado A: '))
b_side = int(input('Digite o lado B: '))
c_side = int(input('Digite o lado C: '))
# forma 1
if ((a_side < b_side + c_side) and (b_side < a_side + c_side) and (c_side < b_side + a_side)):
print('É triângulo!')
else:
print('NÃO é triângulo!')
# forma 2
# if ((a_side < b_side + c_side) and (b_side < a_side + c_side)):
# if (c_side < b_side + a_side):
# print('É triângulo!')
# else:
# print('NÃO é triângulo!')
# else:
# print('NÃO é triângulo!') | [
"maumneto@gmail.com"
] | maumneto@gmail.com |
b1b69d5326d1d210526a948edb47654c36de686a | 67679246a41441487e8708793cd32def4b27ae9f | /statistics.py | 35fb20cd3f6143110f1b12ca2176c6b2042d0822 | [] | no_license | jojolin/boledate | d93dc1938c5d96ef0ac53f063fa11349a5872da5 | 2f8020a7ecda1aa9134717815197155067220a2e | refs/heads/master | 2020-12-24T08:37:37.342051 | 2016-11-10T01:03:16 | 2016-11-10T01:03:16 | 73,297,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | #!/usr/bin/env python
# -*- coding=utf-8 -*-
import sys
import jieba
import json
import copy
def stastic(s):
ls = jieba.cut(s)
st = {}
for x in ls:
if x =='':
continue
elif x == '\n':
continue
st.setdefault(x, 0)
st[x] += 1
return st
def load_data(filep):
jss = []
with open(filep, 'r') as r:
for l in r.readlines():
jss.append(json.loads(l))
return jss
def most_like(jjs, topn):
print '='*5, 'most like', '='*5
x = copy.deepcopy(jjs)
x.sort(key=lambda x:int(x['like']), reverse=True)
return x[:topn]
def most_collect(jjs, topn):
print '='*5, 'most collect', '='*5
x = copy.deepcopy(jjs)
x.sort(key=lambda x:int(x['collect']), reverse=True)
return x[:topn]
def get_girls_pics(jjs):
pics = []
for js in jjs:
pics += js['picturls']
return pics
def analyze_demand(jjs):
def _c(js):
js_ = [x for x in js['content'] if not x.strip() =='']
return [x.split(u':')[1] if x.find(u':') > -1 else '' for x in js_[-3:]]
ss = [' '.join(_c(x)) for x in jjs]
return stastic(''.join(ss))
def main():
filep = sys.argv[1]
minnum, maxnum = 0, 10000
try:
minnum = int(sys.argv[2])
maxnum = int(sys.argv[3])
except:
pass
jjs = load_data(filep)
for x in most_like(jjs, 10):
print "like: ", x['like'], "picturls: ", x["picturls"][:1]
for x in most_collect(jjs, 10):
print "collect: ", x['collect'], "picturls: ", x["picturls"][:1]
for x in get_girls_pics(jjs)[:10]:
print x
st = analyze_demand(jjs)
for k, v in st.items():
if int(v) > minnum and int(v) <= maxnum:
print k, v
if __name__ == '__main__':
main()
| [
"ljzjojop@126.com"
] | ljzjojop@126.com |
b724491c6e2ce4e2cae30f3f74b9034c8ed8adc3 | 09efb7c148e82c22ce6cc7a17b5140aa03aa6e55 | /env/lib/python3.6/site-packages/pandas/compat/numpy/__init__.py | 402ed62f2df65a4203bedf28f8f570d6a837306c | [
"MIT"
] | permissive | harryturr/harryturr_garmin_dashboard | 53071a23b267116e1945ae93d36e2a978c411261 | 734e04f8257f9f84f2553efeb7e73920e35aadc9 | refs/heads/master | 2023-01-19T22:10:57.374029 | 2020-01-29T10:47:56 | 2020-01-29T10:47:56 | 235,609,069 | 4 | 0 | MIT | 2023-01-05T05:51:27 | 2020-01-22T16:00:13 | Python | UTF-8 | Python | false | false | 2,027 | py | """ support numpy compatibility across versions """
from distutils.version import LooseVersion
import re
import numpy as np
# numpy versioning
_np_version = np.__version__
_nlv = LooseVersion(_np_version)
_np_version_under1p14 = _nlv < LooseVersion("1.14")
_np_version_under1p15 = _nlv < LooseVersion("1.15")
_np_version_under1p16 = _nlv < LooseVersion("1.16")
_np_version_under1p17 = _nlv < LooseVersion("1.17")
_np_version_under1p18 = _nlv < LooseVersion("1.18")
_is_numpy_dev = ".dev" in str(_nlv)
if _nlv < "1.13.3":
raise ImportError(
"this version of pandas is incompatible with "
"numpy < 1.13.3\n"
"your numpy version is {0}.\n"
"Please upgrade numpy to >= 1.13.3 to use "
"this pandas version".format(_np_version)
)
_tz_regex = re.compile("[+-]0000$")
def tz_replacer(s):
if isinstance(s, str):
if s.endswith("Z"):
s = s[:-1]
elif _tz_regex.search(s):
s = s[:-5]
return s
def np_datetime64_compat(s, *args, **kwargs):
"""
provide compat for construction of strings to numpy datetime64's with
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
s = tz_replacer(s)
return np.datetime64(s, *args, **kwargs)
def np_array_datetime64_compat(arr, *args, **kwargs):
"""
provide compat for construction of an array of strings to a
np.array(..., dtype=np.datetime64(..))
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
# is_list_like
if hasattr(arr, "__iter__") and not isinstance(arr, (str, bytes)):
arr = [tz_replacer(s) for s in arr]
else:
arr = tz_replacer(arr)
return np.array(arr, *args, **kwargs)
__all__ = [
"np",
"_np_version",
"_np_version_under1p14",
"_np_version_under1p15",
"_np_version_under1p16",
"_np_version_under1p17",
"_is_numpy_dev",
]
| [
"griffin.harrisonn@gmail.com"
] | griffin.harrisonn@gmail.com |
7d5798ca9c2cb5010694620dd173ee271b66782a | f3d38d0e1d50234ce5f17948361a50090ea8cddf | /프로그래머스/level 1/[1차] 다트 게임.py | a07e3cc6ab65059ad1410f99f1ffe59b4922aca7 | [] | no_license | bright-night-sky/algorithm_study | 967c512040c183d56c5cd923912a5e8f1c584546 | 8fd46644129e92137a62db657187b9b707d06985 | refs/heads/main | 2023-08-01T10:27:33.857897 | 2021-10-04T14:36:21 | 2021-10-04T14:36:21 | 323,322,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | # https://programmers.co.kr/learn/courses/30/lessons/17682
dartResult = input().split('SDT')
print(dartResult)
| [
"bright_night_sky@naver.com"
] | bright_night_sky@naver.com |
c5725453489b3861d7623c96fabc0d93440d6c8b | f1a5a3ead11f18b3945ebf9c3522916918a2f740 | /income/migrations/0008_incometarget.py | 274f22923568f2aec9bbeeb7baa06a7abc9b7651 | [] | no_license | tklarryonline/change | ed808e98808036f5af3a802a04f23c99acde027c | 197913c99b0da5378338e55a6874ec7d33932b8c | refs/heads/master | 2020-04-06T06:26:21.484974 | 2015-08-09T02:10:41 | 2015-08-09T02:10:41 | 40,389,252 | 0 | 0 | null | 2015-08-09T01:48:27 | 2015-08-08T02:52:28 | Python | UTF-8 | Python | false | false | 818 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('income', '0007_auto_20150808_2021'),
]
operations = [
migrations.CreateModel(
name='IncomeTarget',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('number', models.FloatField(verbose_name='Income')),
('year', models.IntegerField()),
('month', models.IntegerField()),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"daotranbang@gmail.com"
] | daotranbang@gmail.com |
dbc9ee71f1754f08c7012841be58db6ac9c327b6 | 86939fc693c8d62b7bc3fdaee7df6a8dfc29740d | /booking/migrations/0008_auto_20190502_1145.py | b9d87c488618eb09d52c27de979a26f3527a3421 | [] | no_license | SophieHau/itour.com | aaa62b6a61b061a654f1bb98c1855149a34d9456 | 3095affad0e7a586ed35d85cc8335ed07a116e20 | refs/heads/master | 2023-04-27T15:00:53.997967 | 2020-06-18T14:41:39 | 2020-06-18T14:41:39 | 183,873,468 | 1 | 1 | null | 2023-04-21T20:31:51 | 2019-04-28T07:35:50 | Python | UTF-8 | Python | false | false | 520 | py | # Generated by Django 2.2 on 2019-05-02 08:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('booking', '0007_auto_20190430_1541'),
]
operations = [
migrations.AlterField(
model_name='booking',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"you@example.com"
] | you@example.com |
f652c356d150bdb7174d34ffa1c5ea7c80d5af86 | 2cc0a6819b63a89593c49867dc4e0e9f12b53843 | /reports/profit_loss.py | 3e990d98d7803d44f18edffb7b591c715807fe48 | [] | no_license | Joylizzie/Financial_reports | 9ca3937dc1c47eb33e5e78ee69e0ccaeaeea8491 | 7f1ecade32f2313f6202a2b69216e6a99eab4a79 | refs/heads/main | 2023-06-03T01:50:38.223424 | 2021-06-21T01:46:57 | 2021-06-21T01:46:57 | 344,964,510 | 0 | 0 | null | 2021-06-21T01:46:57 | 2021-03-05T23:59:42 | HTML | UTF-8 | Python | false | false | 2,210 | py | import os
import psycopg2
import datetime
import csv
# get connection
def _get_conn(pw, user_str):
conn = psycopg2.connect(host="localhost",
database = db,
user= user_str,
password=pw)
conn.autocommit = False
return conn
# Get (debit - credit) amount by category with rollup during start and end date
def get_t_list(conn, coacat_id_tup, start_date, end_date):
sql_file = open('reports/t_list_pl.sql', 'r')
sql = sql_file.read()
#print(sql)
with conn.cursor() as curs:
curs.execute(sql, {'start_date':start_date, 'end_date':end_date, 'coacat_id_tup':coacat_id_tup}) #cursor closed after the execute action
t_list_w_None = curs.fetchall()
#print(t_list_w_None)
conn.commit()
# write to csv file
with open(os.path.join('reporting_results', 't_list_query_res_pl.csv'), 'w') as write_obj:
csv_writer = csv.writer(write_obj)
for item in t_list_w_None:
csv_writer.writerow(item)
print('pl_t_list_None done writing')
return t_list_w_None
def pl(conn, start_date, end_date):
t_list_pl = get_t_list(conn, coacat_id_tup, start_date, end_date)
out_file = start_date.strftime("%m") + "_" + start_date.strftime("%Y")
with open(os.path.join('reporting_results', f'pl_{out_file}.csv'), 'w') as pl:
name = 'Ocean Stream profit and loss - Year 2021'
pl_writer = csv.writer(pl)
pl_writer.writerow(['Ocean Stream'])
pl_writer.writerow(['Profit and Loss'])
pl_writer.writerow(['Year 2021'])
pl_writer.writerow(['','', f'{start_date.strftime("%b-%Y")}'.center(15)])
for item in t_list_pl:
pl_writer.writerow(item)
print('pl csv done writing')
if __name__ == '__main__':
db = 'ocean_stream'
pw = os.environ['POSTGRES_PW']
user_str = os.environ['POSTGRES_USER']
conn = _get_conn(pw, user_str)
coacat_id_tup = (5,6)
start_date = datetime.date(2021,3,1)
end_date = datetime.date(2021,3,31)
#get_t_list(conn, coacat_id_tup, start_date, end_date)
pl(conn, start_date, end_date)
| [
"lizzievictory@yahoo.co.uk"
] | lizzievictory@yahoo.co.uk |
a5a26db8cd95d0db06dceb178c344c0a73c2420a | 65c31008f79a1227e8eda04f507e2ef26413bd3a | /contains-duplicate-iii.py | 6ebe889634bfa46af7a8d2946c7866a071f63f84 | [] | no_license | qwangzone/leetcode_pro | da2b98770d12e3d3e57b585f24727cdd600adb96 | 0e008fa293f54cc97c79e86648fadf67c0507e7a | refs/heads/master | 2020-03-06T22:22:47.434221 | 2018-04-28T09:00:53 | 2018-04-28T09:00:53 | 127,101,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | '''
给定一个整数数组,判断数组中是否有两个不同的索引 i 和 j,
使 nums [i] 和 nums [j] 的绝对差值最大为 t,并且 i 和 j 之间的绝对差值最大为 ķ。
'''
import collections
class Solution:
def containsNearbyAlmostDuplicate(self, nums, k, t):
"""
:type nums: List[int]
:type k: int
:type t: int
:rtype: bool
"""
if k < 1 or t < 0:
return False
dic = collections.OrderedDict()
for n in nums:
key = n if not t else n // t
for m in (dic.get(key - 1), dic.get(key), dic.get(key + 1)):
if m is not None and abs(n - m) <= t:
return True
if len(dic) == k:
dic.popitem(False)
dic[key] = n
return False
a=Solution()
#a.containsNearbyAlmostDuplicate([-3,3,2,1,2],2,4)
print(a.containsNearbyAlmostDuplicate([-3,3,2,1,2],2,4)) | [
"578380132@qq.com"
] | 578380132@qq.com |
1be9824de0ab8c86be8701be89f57590ef508c10 | 0c327b6aba4d91edcbef44994f8184cbca840d23 | /chapter5/truffle-react/node_modules/scrypt/build/config.gypi | 84ea83026a0fb43f09abc207927f62d52965bf09 | [
"MIT"
] | permissive | PacktPublishing/Truffle-Quick-Start-Guide | 9d0f07c1f3b750619b17302e391dc3432b13838c | e3a9f476e8e106638b29d6f21a7bfce1894eadee | refs/heads/master | 2023-02-09T10:00:02.129654 | 2023-01-30T08:53:24 | 2023-01-30T08:53:24 | 132,131,046 | 25 | 16 | null | null | null | null | UTF-8 | Python | false | false | 5,141 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt58l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt58l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "58",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 48,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "48.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_inspector": "true",
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/nikhilwins/.node-gyp/6.12.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/Users/nikhilwins/.nvm/versions/node/v6.12.0/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/Users/nikhilwins/.nvm/versions/node/v6.12.0/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/nikhilwins/.npm-init.js",
"userconfig": "/Users/nikhilwins/.npmrc",
"cidr": "",
"node_version": "6.12.0",
"user": "501",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/Users/nikhilwins/.nvm/versions/node/v6.12.0/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/nikhilwins/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/5.6.0 node/v6.12.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/mk/3_xrld0x091dnb5xtq6kg3fr0000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/Users/nikhilwins/.nvm/versions/node/v6.12.0"
}
}
| [
"nikhil38@gmail.com"
] | nikhil38@gmail.com |
14f0f733051e1b9fba1a19653066a8aaa4a3d3f7 | 836742229fcbcb64024aba3489df040bba9bba1e | /django_kurs.py | 20b87fe3455ae04c2267a335cc21f4a76fa0b6c6 | [] | no_license | SylwiaC/my-first-blog | 4633110161c4d0cbc88ec6ea23ce7b3d67af2c48 | 6618238ec537d9b26e972fb7989ea536c2c77a1b | refs/heads/master | 2021-01-04T02:42:03.920547 | 2016-12-10T16:31:37 | 2016-12-10T16:31:37 | 76,113,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | print("Sylwia")
if 3 > 2:
print("To działa!")
glosnosc = 57
if glosnosc < 20:
print("Prawie nic nie slychac.")
elif 20 <= glosnosc < 40:
print("0, muzyka leci w tle.")
elif 40 <= glosnosc < 60:
print("Idealnie, mogę uslyszec wszystkie detale")
elif 60 <= glosnosc < 80:
print("Dobre na imprezy")
elif 80 <= glosnosc < 100:
print("Troszeczke za glosno!")
else:
print("Ojoj!Moje uszy! :(")
def hej ():
print("Hej!")
print("Jak sie masz?")
hej()
imie = "Sylwia"
def hej (imie):
if imie == "Ola":
print("Hej Ola!")
elif imie == "Iza":
print("Hej Iza")
else:
print ("Hej Skarbie")
hej(imie)
def hej(imie):
print("Hej" +imie+ "!")
dziewczyny = ["Sylwia", "Marta", "Justyna", "Edyta"]
for imie in dziewczyny:
hej(imie)
print("Miło Cie widziec")
| [
"s.czarnomska@gmail.com"
] | s.czarnomska@gmail.com |
1d38f92409f264e0523912440a04173dbc61d9c5 | d52d3ab219e3d7b44a2f117482da1b649365c79a | /scripts/combine_caffe_model/combine_utility.py | 046dbde788d1e53ca7a61128d997fc7d0ec93379 | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | nbadalls/caffe-server | 949949295f98aa51fff93d98ddb4e2338d76b0b8 | 11c5dbea5134d7c4b234645e4fb597ec2eeb5527 | refs/heads/master | 2020-03-18T11:26:16.475558 | 2018-09-26T08:31:11 | 2018-09-26T08:31:11 | 134,671,222 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,899 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 10 18:12:01 2018
@author: minivision
"""
from __future__ import print_function
import sys
sys.path.append('/home/minivision/SoftWare/caffe-server/python')
import os
from google.protobuf import text_format
import caffe_pb2
import layer_lib
#all deploy with same input size
#del_last_layer_num indice delete num of last layers
#record_layer_index record each net's last layer's indice and name
#example: record_layer_index[1] = "softmax-1" record_layer_index[25] = "softmax-2"
def combine_single_deploy(nets_info, del_last_layer_num):
net_proto = caffe_pb2.NetParameter()
record_layer_index = {}
num_nets = len(nets_info)
#init net proto
if num_nets > 0:
f = open(nets_info[0]['dstNet'], 'r')
text_format.Merge(f.read(), net_proto)
f.close()
net_proto.input[0] = "data"
for i in range(del_last_layer_num):
del net_proto.layer[-1]
#sum last number togther
sum_layer_num = len(net_proto.layer)-1
record_layer_index[sum_layer_num] = net_proto.layer[len(net_proto.layer)-1].name
#concat first layer with "data"
net_proto.layer[0].bottom[0] = "data"
#write into each layer
for index in range(1, num_nets):
net_proto_single = caffe_pb2.NetParameter()
f = open(nets_info[index]['dstNet'], 'r')
text_format.Merge(f.read(), net_proto_single)
f.close()
#concat all combine net name togther
net_proto.name += "||{}".format(net_proto_single.name)
#concat first layer with "data"
net_proto_single.layer[0].bottom[0] = "data"
for i in range(del_last_layer_num):
del net_proto_single.layer[-1]
sum_layer_num+=len(net_proto_single.layer)
record_layer_index[sum_layer_num] = net_proto_single.layer[len(net_proto_single.layer)-1].name
#combine into net proto
for elem_layer in net_proto_single.layer:
#net_proto.layer+= elem_layer
net_proto.layer.extend([elem_layer])
return net_proto, record_layer_index
#use to combine different models loaded by opencv
def combine_single_deploy_model_merge(nets_info, del_last_layer_num):
net_proto = caffe_pb2.NetParameter()
record_layer_index = {}
num_nets = len(nets_info)
sum_layer_num = 0
#init net proto
if num_nets > 0:
for index in range(0, num_nets):
net_proto_single = caffe_pb2.NetParameter()
f = open(nets_info[index]['dstNet'], 'r')
text_format.Merge(f.read(), net_proto_single)
f.close()
#delete last n layer
for i in range(del_last_layer_num):
del net_proto_single.layer[-1]
sum_layer_num+=len(net_proto_single.layer)
record_layer_index[sum_layer_num] = net_proto_single.layer[len(net_proto_single.layer)-1].name
#combine into net proto
for elem_layer in net_proto_single.layer:
#net_proto.layer+= elem_layer
net_proto.layer.extend([elem_layer])
return net_proto, record_layer_index
def create_single_prefix_deploy(nets):
for elem_net in nets:
inputNet = elem_net['originalNet']
addedPrefix = elem_net['prefix']
outputNet = elem_net['dstNet']
outputMap = elem_net['outputLayerMap']
with open(inputNet,'r') as f:
originalNetSpec = f.read().splitlines()
nameMap=[]
for idx in xrange(len(originalNetSpec)):
if ('name:' in originalNetSpec[idx]) or ('top:' in originalNetSpec[idx]) or ('bottom:' in originalNetSpec[idx]) or ('input:' in originalNetSpec[idx]):
originalText = originalNetSpec[idx].split(":")[-1].lstrip(" ")
newText = '"' + addedPrefix + originalText.lstrip('"')
originalNetSpec[idx] = originalNetSpec[idx].replace(originalText,newText)
print (originalNetSpec[idx])
if (('name:' in originalNetSpec[idx]) or ('input:' in originalNetSpec[idx])) and ('#' not in originalNetSpec[idx]):
nameMap.append(originalText.split('"')[1] + ',' + newText.split('"')[1])
with open(outputNet,'w') as f:
for line in originalNetSpec:
f.write("{}\n".format(line))
with open(outputMap,'w') as f:
for line in nameMap:
f.write("{}\n".format(line))
f.close()
#Input model path information
def create_single_net(root_path, dst_path, patch_folder, prefix_name):
#root_path = "/home/minivision/Work_File/Combine_Model/FakeFace/Combine"
#patch_folder = ['FakeFace_fc_0.4_96x96_DeepID_S', 'FakeFace_le_0.3_80x80_DeepID', 'FakeFace_le_re_n_0.8_60x60_DeepID']
#prefix_name = ['fc_0.4_96X96', 'le_0.3_80X80', 'le_re_n_0.8_60X60']
model_path = []
deploy_file_path = []
for folder in patch_folder:
abs_path = '{}/{}'.format(root_path, folder)
for patch_root_path, folders, filenames in os.walk(abs_path):
for filename in filenames:
if filename.endswith(".caffemodel"):
model_path.append('{}/{}'.format(patch_root_path, filename))
if filename == 'deploy.prototxt':
deploy_file_path.append('{}/{}'.format(patch_root_path, filename))
nets = []
dst_model_path = {}
for i in range(len(patch_folder)):
net_info = {}
prefix_folder = '{}/{}/prefixed'.format(root_path, patch_folder[i])
if not os.path.exists(prefix_folder):
os.makedirs(prefix_folder)
#for rename deploy
# net_info['originalNet'] = '{}/{}/deploy.prototxt'.format(root_path, patch_folder[i])
net_info['originalNet'] = deploy_file_path[i]
net_info['dstNet'] = '{}/{}/prefixed/prefixed_deploy.prototxt'.format(root_path, patch_folder[i])
# net_info['prefix'] = '{}/'.format(prefix_name[i].replace('.', '_'))
net_info['prefix'] = '{}/'.format(prefix_name[i])
net_info['outputLayerMap'] = '{}/{}/prefixed/{}_layer_map.txt'.format(root_path, patch_folder[i], prefix_name[i])
#for convert model
net_info['net_configuration'] = net_info['originalNet']
# net_info['pretrained_model'] = '{}/{}/{}'.format(root_path, patch_folder[i], model_name[i])
net_info['pretrained_model'] = model_path[i]
net_info['layer_map'] = net_info['outputLayerMap']
nets.append(net_info)
dst_model_path['dst_deploy'] = '{}/combine_{}_models_deploy.prototxt'.format(dst_path, len(nets))
dst_model_path['dst_model'] = '{}/combine_{}_models.caffemodel'.format(dst_path, len(nets))
return nets, dst_model_path
| [
"1277046020@qq.com"
] | 1277046020@qq.com |
0ce5c16aef6c0e679607779d696ea37852497c5e | ef42dad5d357f97b48b5e6d2a648610e3a99a689 | /Leetcode/LC555.py | 212b7dba17b0c058066db42c19da3ed419c27290 | [] | no_license | zbgzbg2007/FromOnlineJudge | 38e47e01c2eee227d8c8cd65e758eede546029a9 | 1ef919edfe806e206705f70a16faccd59ed4c918 | refs/heads/master | 2021-01-01T06:27:51.901094 | 2017-07-02T23:38:02 | 2017-07-02T23:38:02 | 28,985,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | class Solution(object):
def splitLoopedString(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
ans = ''.join(strs)
n = len(strs)
for i in range(n):
strs[i] = max(strs[i], strs[i][::-1])
for i in range(n):
for j in range(len(strs[i])):
t = max(strs[i][j:], strs[i][::-1][j:])
t += ''.join(strs[i+1:]+strs[:i])
if strs[i][j:] > strs[i][::-1][j:]:
t += strs[i][:j]
else:
t += strs[i][::-1][:j]
ans = max(ans, t)
return ans
| [
"noreply@github.com"
] | noreply@github.com |
3e0eaa1e9f438b01cd334c495bcc71d683966404 | a2d92d65b827b7eba2656308948e1e8e02bc151f | /mod_search_site/utils.py | ea5f2ae2c3e47cb7b65ee1a579aaba608b70e737 | [
"MIT",
"Apache-2.0"
] | permissive | phat34/poe-timeless-jewel-multitool | b25b7124f19849d330bc4cf2b65ff1c263355020 | cce6ead08a4cebd68cf72e67546bcc2c9d7bf7cd | refs/heads/master | 2020-07-10T03:18:34.093585 | 2019-08-21T12:55:25 | 2019-08-21T12:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import yaml
import os
import time
def get_config(module_name):
config_path = os.path.abspath('config.yml')
config = yaml.safe_load(open(config_path, 'r'))
return config[module_name]
| [
"johan_ahlkvist@hotmail.com"
] | johan_ahlkvist@hotmail.com |
2aa61da92ab7fef091fed3370a3c4cdae0b1145a | 9f460cf29996fc66dc7620e37501bb21689f3849 | /api2/manager.py | f242e4258c8c3eb6761060a10c39fde1383a7ebb | [] | no_license | chiangyiyang/Flask_SimpleRestfulApi | 9f898ede4e3daed3b4c26182e854f81471d6f6e0 | e6b6a5cd6a171c37a6886e3674b22c44a0c14b3b | refs/heads/master | 2022-10-08T16:43:58.374467 | 2019-10-07T05:39:47 | 2019-10-07T05:39:47 | 210,108,088 | 0 | 0 | null | 2022-09-16T18:11:16 | 2019-09-22T07:31:04 | Python | UTF-8 | Python | false | false | 465 | py | import time
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
from .main import app
from .api import api
from .models import db, models
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, models=models)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| [
"chiangyiyang@gmail.com"
] | chiangyiyang@gmail.com |
a774470a7e2db13264d325c1976ae8ec6dee8d00 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/keyvault/azure-keyvault-certificates/samples/contacts_async.py | e507aa27bc710c57bb4ac3716e6bdee9382a26e0 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 2,461 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import asyncio
from azure.identity.aio import DefaultAzureCredential
from azure.keyvault.certificates.aio import CertificateClient
from azure.keyvault.certificates import CertificateContact
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/azure/key-vault/quick-create-cli)
#
# 2. azure-keyvault-certificates and azure-identity packages (pip install these)
#
# 3. Set up your environment to use azure-identity's DefaultAzureCredential. For more information about how to configure
# the DefaultAzureCredential, refer to https://aka.ms/azsdk/python/identity/docs#azure.identity.DefaultAzureCredential
#
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates basic CRUD operations for the certificate contacts for a key vault.
#
# 1. Create contacts (set_contacts)
#
# 2. Get contacts (get_contacts)
#
# 3. Delete contacts (delete_contacts)
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a certificate client that will be used to call the service.
# Here we use the DefaultAzureCredential, but any azure-identity credential can be used.
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = CertificateClient(vault_url=VAULT_URL, credential=credential)
contact_list = [
CertificateContact(email="admin@contoso.com", name="John Doe", phone="1111111111"),
CertificateContact(email="admin2@contoso.com", name="John Doe2", phone="2222222222"),
]
# Creates and sets the certificate contacts for this key vault.
await client.set_contacts(contact_list)
# Gets the certificate contacts for this key vault.
contacts = await client.get_contacts()
for contact in contacts:
print(contact.name)
print(contact.email)
print(contact.phone)
# Deletes all of the certificate contacts for this key vault.
await client.delete_contacts()
print("\nrun_sample done")
await credential.close()
await client.close()
if __name__ == "__main__":
asyncio.run(run_sample())
| [
"noreply@github.com"
] | noreply@github.com |
8062c46e2d81dd3ae8d93027c9e33cd9d4d6a494 | 104d2dc6c2596c1856125fd1a179d4c39dd64abd | /saas-infrastructure/compute/api_auth/base/auth_exceptions.py | 481f492a1c058cb0296d85978e4a11204b525645 | [] | no_license | pixegami/saas-starter | c59947e0238025d05689f428f76fe1635dca27bc | 3ca211c18bf9d487917d7a1aaed8732738a8aec4 | refs/heads/master | 2023-07-04T02:48:12.279451 | 2021-07-31T10:32:56 | 2021-07-31T10:32:56 | 349,625,579 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,330 | py | from api_utils import ApiException
class AuthExceptions:
# 401
AUTH_FAILURE = ApiException(401, "Incorrect password or email.")
INVALID_TOKEN = ApiException(
401, "Authentication token is invalid or has expired. Please sign in again."
)
MISSING_HEADER = ApiException(401, "Missing authentication header.")
# 402
MEMBERSHIP_NOT_VALID = ApiException(402, "Is not a premium member.")
MEMBERSHIP_EXPIRED = ApiException(402, "Membership has expired.")
# 404
USER_NOT_FOUND = ApiException(404, "This email account does not exist.")
KEY_NOT_FOUND = ApiException(404, "Items key not found in database.")
TOKEN_NOT_FOUND = ApiException(
404,
"This token no longer or exists or has expired. Please request a new token.",
)
# 500
DUPLICATE_ENTRIES_FOUND = ApiException(
500, "Unexpected duplicate entries were found for this key."
)
# 400
INVALID_EMAIL = ApiException(
400, "This email is invalid. Please enter a valid email address."
)
USER_ALREADY_EXISTS = ApiException(
400, "This email account has already been registered."
)
# 429
TOO_MANY_FAILED_ATTEMPTS = ApiException(
429,
"Too many failed sign-in attempts. Please wait up to 24 hours before trying again.",
)
| [
"pixegami@gmail.com"
] | pixegami@gmail.com |
9ec1c9ebe2e1db8624edd9b4c83f29e98893681f | 40596d4bc8f2f4a370e86551fe8725d60a92afbf | /examples_sqlmodel/sqlmodel_example.py | e36e6ddb700240bf6f211eed0084e7636d23ebb9 | [] | no_license | fullstack-spiderman/sqlmodel_example | 11822f575e1f4dc400c05264e71c009dd18cda09 | 3416be7c1f8b4b4401da9eec7af84be8118f52b4 | refs/heads/main | 2023-09-04T06:42:56.597500 | 2021-10-16T15:08:20 | 2021-10-16T15:08:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,341 | py | from operator import or_
from typing import List, Optional
from sqlmodel import create_engine, Field, Relationship, Session, SQLModel, col, or_, select
class Team(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
headquarters: str
heroes: List['Hero'] = Relationship(back_populates='team')
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
team_id: Optional[int] = Field(default=None, foreign_key='team.id')
team: Optional[Team] = Relationship(back_populates='heroes')
sql_filename = 'superheroe.db'
sql_uri = f'sqlite:///{sql_filename}'
engine = create_engine(sql_uri, echo=True)
def create_db_tables():
SQLModel.metadata.create_all(engine)
def create_teams():
with Session(engine) as session:
team_a = Team(name="Team Alpha", headquarters="Sharp Tower")
team_b = Team(name="Team Beta", headquarters="Aqua World")
teams = [team_a, team_b]
for team in teams:
session.add(team)
session.commit()
for team in teams:
session.refresh(team)
return {'team_a': team_a, 'team_b': team_b}
def create_heroes():
# h1 = Hero(name='Spiderman', secret_name='Peter Parker', age=19)
# h2 = Hero(name='Ironman', secret_name='Tony Stark')
teams = create_teams()
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson",
team_id=teams['team_a'].id)
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador",
team_id=teams['team_b'].id)
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48,
team_id=teams['team_a'].id)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32,
team_id=teams['team_b'].id)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35,
team_id=teams['team_b'].id)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America",
secret_name="Esteban Rogelios", age=93)
hero_6.team = teams.get('team_a')
# hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
# hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
# hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
# hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
# hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
# hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
# hero_7 = Hero(name="Captain North America",
# secret_name="Esteban Rogelios", age=93)
heroes = [hero_1, hero_2, hero_3, hero_4, hero_5, hero_6, hero_7]
with Session(engine) as session:
for hero in heroes:
session.add(hero)
session.commit()
for hero in heroes:
session.refresh(hero)
for hero in heroes:
print({f'{hero.id}': hero})
def create_heroes_with_relationship_attributes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(
name="Z-Force", headquarters="Sister Margaret’s Bar")
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team=team_z_force
)
hero_rusty_man = Hero(
name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers
)
hero_spider_boy = Hero(
name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
def select_team_heroes():
with Session(engine) as session:
statement = select(Team).where(Team.name == "Preventers")
result = session.exec(statement)
team_preventers = result.one()
print("Preventers heroes:", team_preventers.heroes)
def update_hero_team():
with Session(engine) as session:
hero = session.exec(
# select(Hero).where(
# Hero.id == 4
# )
select(Hero).where(
Hero.name == 'Dr. Weird'
)
).one_or_none()
print({'hero': hero, 'team': hero.team, 'team_id': hero.team_id})
hero.team = None
session.add(hero)
session.commit()
session.refresh(hero)
print({'hero': hero, 'team': hero.team, 'team_id': hero.team_id})
def select_heroes():
with Session(engine) as session:
# statement = select(Hero)
# results = session.exec(statement)
# # for hero in results:
# # print(hero)
# heroes = results.all()
heroes = session.exec(select(Hero)).all()
print({'heroes': heroes})
def select_heroes_with_teams():
with Session(engine) as session:
statement = select(Hero, Team).where(Hero.id == Team.id)
# heroes_with_teams = session.exec(statement).all()
# print({'heroes_with_teams': heroes_with_teams})
results = session.exec(statement)
for hero, team in results:
print({'hero': hero, 'team': team})
def select_heroes_with_teams_using_join():
with Session(engine) as session:
statement = select(Hero, Team).join(Team)
# heroes_with_teams = session.exec(statement).all()
# print({'heroes_with_teams': heroes_with_teams})
results = session.exec(statement)
for hero, team in results:
print({'hero': hero, 'team': team})
def select_heroes_teams_using_left_outer_join():
with Session(engine) as session:
statement = select(Hero, Team).join(Team, isouter=True)
results = session.exec(statement)
for hero, team in results:
print({'hero': hero, 'team': team})
def select_heroes_from_aqua():
with Session(engine) as session:
statement = select(Hero, Team).join(
Team).where(Team.headquarters == "Aqua World")
results = session.exec(statement)
for hero, team in results:
print({'hero': hero, 'team': team})
def select_hereos_where():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == 'Deadpond')
results = session.exec(statement)
for hero in results:
print(hero)
def select_senior_heroes():
with Session(engine) as session:
# can use col to avoid getting linter warnings about optional[int] that could have a None value
statement = select(Hero).where(col(Hero.age) >= 35)
results = session.exec(statement)
for hero in results:
print(hero)
def select_junior_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.age < 35)
results = session.exec(statement)
for hero in results:
print(hero)
def select_young_heroes():
with Session(engine) as session:
# statement = select(Hero).where(Hero.age >= 35).where(Hero.age < 40)
statement = select(Hero).where(Hero.age >= 35, Hero.age < 40)
results = session.exec(statement)
for hero in results:
print(hero)
def select_youngest_or_oldest_heroes():
with Session(engine) as session:
statement = select(Hero).where(
or_(
Hero.age <= 35, Hero.age > 90
)
)
results = session.exec(statement)
for hero in results:
print(hero)
def select_one_resulting_hero():
with Session(engine) as session:
statement = select(Hero).where(
col(Hero.age) == 35
)
result = session.exec(statement)
hero = result.first()
print({"hero": hero})
def select_hero_with_session_get():
with Session(engine) as session:
hero = session.get(Hero, 1)
print({'hero': hero})
def limit_heros_rows():
with Session(engine) as session:
statement = select(Hero).limit(3)
results = session.exec(statement)
heroes = results.all()
print(heroes)
def limit_heroes_rows_with_where():
with Session(engine) as session:
statement = select(Hero).where(
col(Hero.age) > 32
).limit(3)
results = session.exec(statement)
heroes = results.all()
print({'heroes': heroes})
def offset_and_limit_heroes_rows():
with Session(engine) as session:
statement = select(Hero).offset(6).limit(3)
results = session.exec(statement).all()
print(results)
def update_hero():
with Session(engine) as session:
statement = select(Hero).where(
col(Hero.name) == 'Spider-Boy'
)
results = session.exec(statement)
hero = results.one()
print({'hero': hero})
hero.age = 45
session.add(hero)
statement = select(Hero).where(
col(Hero.name) == 'Rusty-Man'
)
results = session.exec(statement)
hero_2 = results.one()
print({'hero_2': hero_2})
hero_2.age = 50
session.add(hero_2)
session.commit()
session.refresh(hero)
session.refresh(hero_2)
print({'hero': hero, 'hero_2': hero_2})
def delete_hero():
with Session(engine) as session:
statement = select(Hero).where(
col(Hero.name) == 'Dr. Weird'
)
results = session.exec(statement)
hero = results.one()
print({'hero': hero})
session.delete(hero)
session.commit()
print({'hero': hero, 'status': 'deleted'})
statement = select(Hero).where(Hero.name == "Dr. Weird")
results = session.exec(statement)
hero = results.first()
print({'hero': hero})
def create_team_with_heroes():
with Session(engine) as session:
hero_black_lion = Hero(
name="Black Lion", secret_name="Trevor Challa", age=35)
hero_sure_e = Hero(name="Princess Sure-E", secret_name="Sure-E")
team_wakaland = Team(
name="Wakaland",
headquarters="Wakaland Capital City",
heroes=[hero_black_lion, hero_sure_e],
)
hero_tarantula = Hero(
name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_dr_weird = Hero(
name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_cap = Hero(
name="Captain North America", secret_name="Esteban Rogelios", age=93
)
team_wakaland.heroes.append(hero_tarantula)
team_wakaland.heroes.append(hero_dr_weird)
team_wakaland.heroes.append(hero_cap)
session.add(team_wakaland)
session.commit()
session.refresh(team_wakaland)
session.refresh(hero_tarantula)
session.refresh(hero_dr_weird)
session.refresh(hero_cap)
print("Team Wakaland:", team_wakaland)
print("Preventers new hero:", hero_tarantula)
print("Preventers new hero:", hero_dr_weird)
print("Preventers new hero:", hero_cap)
def main():
# create_db_tables()
# create_heroes()
# create_team_with_heroes()
# create_heroes_with_relationship_attributes()
# create_heroes_with_relationship_attributes()
# select_team_heroes()
update_hero_team()
# select_heroes()
# select_hereos_where()
# select_senior_heroes()
# select_junior_heroes()
# select_young_heroes()
# select_youngest_or_oldest_heroes()
# select_one_resulting_hero()
# select_hero_with_session_get()
# limit_heros_rows()
# offset_and_limit_heroes_rows()
# limit_heroes_rows_with_where()
# update_hero()
# delete_hero()
# select_heroes_with_teams()
# select_heroes_with_teams_using_join()
# select_heroes_teams_using_left_outer_join()
# select_heroes_from_aqua()
if __name__ == '__main__':
main()
| [
"arjun.umathanu@gmail.com"
] | arjun.umathanu@gmail.com |
3629fd59c9e3499fccaa6218128221056089d372 | e4a949772fe41029f6c9856ad218f76db5cde4d6 | /freshTomatoes/wsgi.py | eb073bd36072dece1078b044c994ade18cb1bceb | [] | no_license | ganap/FreshTomatoes-MovieReview | a672101baa5e2536c5ac8249fc883720c51c54a4 | 01e248582df5b520239cdba2104901043f572cdc | refs/heads/master | 2021-01-10T21:44:19.382678 | 2015-09-16T14:34:02 | 2015-09-16T14:34:02 | 42,589,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for FreshTomatoes project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "freshTomatoes.settings")
application = get_wsgi_application()
| [
"gana.pemmanda@gmail.com"
] | gana.pemmanda@gmail.com |
540f377b1d5277aa11ba94c8ecd27e6051bec033 | bbb4548cbfe4c83f656720beb51ecbc96007bac7 | /iot/api-client/codelabs/gateway.py | 0fd5e3cac0aa233fd223a1bbf312cf819149e229 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | awstrainer007/python-docs-samples | 9fb5aef99958ab5541a16665a819566c9bdd8c52 | c81ca0858eef1046cecb84fcab6f5a72d175c3a8 | refs/heads/master | 2022-11-08T20:53:32.911206 | 2020-06-23T22:01:53 | 2020-06-23T22:01:53 | 274,527,675 | 1 | 0 | Apache-2.0 | 2020-06-23T23:14:46 | 2020-06-23T23:14:45 | null | UTF-8 | Python | false | false | 12,404 | py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import json
import os
import socket
import ssl
import time
from time import ctime
import jwt
import paho.mqtt.client as mqtt
# Hostname of '' means using the IP address of the machine.
HOST = ''
PORT = 10000
BUFF_SIZE = 2048
ADDR = (HOST, PORT)
udpSerSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udpSerSock.setblocking(False)
udpSerSock.bind(ADDR)
class GatewayState:
# This is the topic that the device will receive configuration updates on.
mqtt_config_topic = ''
# Host the gateway will connect to
mqtt_bridge_hostname = ''
mqtt_bridge_port = 8883
# For all PUBLISH messages which are waiting for PUBACK. The key is 'mid'
# returned by publish().
pending_responses = {}
# For all SUBSCRIBE messages which are waiting for SUBACK. The key is
# 'mid'.
pending_subscribes = {}
# for all SUBSCRIPTIONS. The key is subscription topic.
subscriptions = {}
# Indicates if MQTT client is connected or not
connected = False
gateway_state = GatewayState()
def create_jwt(project_id, private_key_file, algorithm, jwt_expires_minutes):
"""Creates a JWT (https://jwt.io) to establish an MQTT connection.
Args:
project_id: The cloud project ID this device belongs to
private_key_file: A path to a file containing either an RSA256 or
ES256 private key.
algorithm: Encryption algorithm to use. Either 'RS256' or 'ES256'
jwt_expires_minutes: The time in minutes before the JWT expires.
Returns:
An MQTT generated from the given project_id and private key,
which expires in 20 minutes. After 20 minutes, your client will
be disconnected, and a new JWT will have to be generated.
Raises:
ValueError: If the private_key_file does not contain a known
key.
"""
token = {
# The time that the token was issued at
'iat': datetime.datetime.utcnow(),
# The time the token expires.
'exp': (
datetime.datetime.utcnow() +
datetime.timedelta(minutes=jwt_expires_minutes)),
# The audience field should always be set to the GCP project id.
'aud': project_id
}
# Read the private key file.
with open(private_key_file, 'r') as f:
private_key = f.read()
print('Creating JWT using {} from private key file {}'.format(
algorithm, private_key_file))
return jwt.encode(token, private_key, algorithm=algorithm)
# [END iot_mqtt_jwt]
# [START iot_mqtt_config]
def error_str(rc):
"""Convert a Paho error to a human readable string."""
return '{}: {}'.format(rc, mqtt.error_string(rc))
def on_connect(client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
print('on_connect', mqtt.connack_string(rc))
gateway_state.connected = True
# Subscribe to the config topic.
client.subscribe(gateway_state.mqtt_config_topic, qos=1)
def on_disconnect(client, unused_userdata, rc):
"""Paho callback for when a device disconnects."""
print('on_disconnect', error_str(rc))
gateway_state.connected = False
# re-connect
# NOTE: should implement back-off here, but it's a tutorial
client.connect(
gateway_state.mqtt_bridge_hostname, gateway_state.mqtt_bridge_port)
def on_publish(unused_client, userdata, mid):
"""Paho callback when a message is sent to the broker."""
print('on_publish, userdata {}, mid {}'.format(
userdata, mid))
try:
client_addr, message = gateway_state.pending_responses.pop(mid)
udpSerSock.sendto(message.encode(), client_addr)
print('Pending response count {}'.format(
len(gateway_state.pending_responses)))
except KeyError:
print('Unable to find key {}'.format(mid))
def on_subscribe(unused_client, unused_userdata, mid, granted_qos):
print('on_subscribe: mid {}, qos {}'.format(mid, granted_qos))
try:
client_addr, response = gateway_state.pending_subscribes[mid]
udpSerSock.sendto(response.encode(), client_addr)
except KeyError:
print('Unable to find mid: {}'.format(mid))
def on_message(unused_client, unused_userdata, message):
"""Callback when the device receives a message on a subscription."""
payload = message.payload
qos = message.qos
print('Received message \'{}\' on topic \'{}\' with Qos {}'.format(
payload.decode("utf-8"), message.topic, qos))
try:
client_addr = gateway_state.subscriptions[message.topic]
udpSerSock.sendto(payload, client_addr)
print('Sent message to device')
except KeyError:
print('Nobody subscribes to topic {}'.format(message.topic))
def get_client(
project_id, cloud_region, registry_id, gateway_id, private_key_file,
algorithm, ca_certs, mqtt_bridge_hostname, mqtt_bridge_port,
jwt_expires_minutes):
"""Create our MQTT client. The client_id is a unique string that
identifies this device. For Google Cloud IoT Core, it must be in the
format below."""
client_template = 'projects/{}/locations/{}/registries/{}/devices/{}'
client_id = client_template.format(
project_id, cloud_region, registry_id, gateway_id)
client = mqtt.Client(client_id)
# With Google Cloud IoT Core, the username field is ignored, and the
# password field is used to transmit a JWT to authorize the device.
client.username_pw_set(
username='unused',
password=create_jwt(
project_id, private_key_file, algorithm,
jwt_expires_minutes))
# Enable SSL/TLS support.
client.tls_set(ca_certs=ca_certs, tls_version=ssl.PROTOCOL_TLSv1_2)
# Register message callbacks.
# https://eclipse.org/paho/clients/python/docs/
# describes additional callbacks that Paho supports. In this example,
# the callbacks just print to standard out.
client.on_connect = on_connect
client.on_publish = on_publish
client.on_disconnect = on_disconnect
client.on_message = on_message
client.on_subscribe = on_subscribe
# Connect to the Google MQTT bridge.
client.connect(mqtt_bridge_hostname, mqtt_bridge_port)
return client
# [END iot_mqtt_config]
def parse_command_line_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description=(
'Example Google Cloud IoT Core MQTT device connection code.'))
parser.add_argument(
'--project_id',
default=os.environ.get('GOOGLE_CLOUD_PROJECT'),
help='GCP cloud project name')
parser.add_argument(
'--registry_id', required=True,
help='Cloud IoT Core registry id')
parser.add_argument(
'--gateway_id', required=True,
help='Cloud IoT Core gateway id')
parser.add_argument(
'--private_key_file',
required=True, help='Path to private key file.')
parser.add_argument(
'--algorithm',
choices=('RS256', 'ES256'),
required=True,
help='Which encryption algorithm to use to generate the JWT.')
parser.add_argument(
'--cloud_region', default='us-central1',
help='GCP cloud region')
parser.add_argument(
'--ca_certs',
default='roots.pem',
help=('CA root from https://pki.google.com/roots.pem'))
parser.add_argument(
'--mqtt_bridge_hostname',
default='mqtt.googleapis.com',
help='MQTT bridge hostname.')
parser.add_argument(
'--mqtt_bridge_port',
choices=(8883, 443),
default=8883,
type=int,
help='MQTT bridge port.')
parser.add_argument(
'--jwt_expires_minutes',
default=1200,
type=int,
help=('Expiration time, in minutes, for JWT tokens.'))
return parser.parse_args()
def attach_device(client, device_id):
attach_topic = '/devices/{}/attach'.format(device_id)
print(attach_topic)
return client.publish(attach_topic, "", qos=1)
def detatch_device(client, device_id):
detach_topic = '/devices/{}/detach'.format(device_id)
print(detach_topic)
return client.publish(detach_topic, "", qos=1)
# [START iot_mqtt_run]
def main():
global gateway_state
args = parse_command_line_args()
gateway_state.mqtt_config_topic = '/devices/{}/config'.format(
parse_command_line_args().gateway_id)
gateway_state.mqtt_bridge_hostname = args.mqtt_bridge_hostname
gateway_state.mqtt_bridge_port = args.mqtt_bridge_hostname
client = get_client(
args.project_id, args.cloud_region, args.registry_id,
args.gateway_id, args.private_key_file, args.algorithm,
args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port,
args.jwt_expires_minutes)
while True:
client.loop()
if gateway_state.connected is False:
print('connect status {}'.format(gateway_state.connected))
time.sleep(1)
continue
try:
data, client_addr = udpSerSock.recvfrom(BUFF_SIZE)
except socket.error:
continue
print('[{}]: From Address {}:{} receive data: {}'.format(
ctime(), client_addr[0], client_addr[1], data.decode("utf-8")))
command = json.loads(data.decode('utf-8'))
if not command:
print('invalid json command {}'.format(data))
continue
action = command["action"]
device_id = command["device"]
template = '{{ "device": "{}", "command": "{}", "status" : "ok" }}'
if action == 'event':
print('Sending telemetry event for device {}'.format(device_id))
payload = command["data"]
mqtt_topic = '/devices/{}/events'.format(device_id)
print('Publishing message to topic {} with payload \'{}\''.format(
mqtt_topic, payload))
_, event_mid = client.publish(mqtt_topic, payload, qos=1)
response = template.format(device_id, 'event')
print('Save mid {} for response {}'.format(event_mid, response))
gateway_state.pending_responses[event_mid] = (
client_addr, response)
elif action == 'attach':
_, attach_mid = attach_device(client, device_id)
response = template.format(device_id, 'attach')
print('Save mid {} for response {}'.format(attach_mid, response))
gateway_state.pending_responses[attach_mid] = (
client_addr, response)
elif action == 'detach':
_, detach_mid = detatch_device(client, device_id)
response = template.format(device_id, 'detach')
print('Save mid {} for response {}'.format(detach_mid, response))
gateway_state.pending_responses[detach_mid] = (
client_addr, response)
elif action == "subscribe":
print('subscribe config for {}'.format(device_id))
subscribe_topic = '/devices/{}/config'.format(device_id)
_, mid = client.subscribe(subscribe_topic, qos=1)
response = template.format(device_id, 'subscribe')
gateway_state.subscriptions[subscribe_topic] = (client_addr)
print('Save mid {} for response {}'.format(mid, response))
gateway_state.pending_subscribes[mid] = (client_addr, response)
else:
print('undefined action: {}'.format(action))
print('Finished.')
# [END iot_mqtt_run]
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
33bd43dbfb2a532027ccd24a9b56dc112c6b10fb | 4de03eecadc4c69caf792f4773571c2f6dbe9d68 | /seahub/utils/ip.py | 15a59d4d96aa5117b9ea3c600e56bdf37f68d062 | [
"Apache-2.0"
] | permissive | Tr-1234/seahub | c1663dfd12f7584f24c160bcf2a83afdbe63a9e2 | ed255e0566de054b5570218cb39cc320e99ffa44 | refs/heads/master | 2022-12-23T16:20:13.138757 | 2020-10-01T04:13:42 | 2020-10-01T04:13:42 | 300,138,290 | 0 | 0 | Apache-2.0 | 2020-10-01T04:11:41 | 2020-10-01T04:11:40 | null | UTF-8 | Python | false | false | 232 | py | def get_remote_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR', '-')
return ip
| [
"colinsippl@gmx.de"
] | colinsippl@gmx.de |
5c6cecb950cc547d5f94e66adb0db5ad6c9e5232 | 0c70e9181d9d79273f6286c605065abd8b94c978 | /solution/model2/solution3.py | 89fc3a5c56cca3e44d9ef1e0f153b47782a2eb32 | [] | no_license | rfiischer/unbounded | 2358ea9cf0460233ab5e9e1eb3714d518f5b161e | 4a50c2e8fdcd053af9b1e9ea37308cd1343265d1 | refs/heads/main | 2023-05-31T08:00:38.277649 | 2021-06-08T05:31:23 | 2021-06-08T05:31:23 | 343,880,827 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | import numpy as np
from scipy.io import savemat
from solution.model2.funcs import optimize1
n_angles = 1024
solution_linear = np.zeros((4096, 50))
for i in range(50):
print(f"Linear, user {i}")
solution_linear[:, i] = optimize1(i, n_angles)
savemat('model_solution/solution3_linear.mat', {'theta': solution_linear})
| [
"rfischeracc@gmail.com"
] | rfischeracc@gmail.com |
a1f4f12a39ea5f221501e6c02eb4d65abb5df7fa | 1a00c97bce8e9532f7a38980cae40a27d6192f11 | /do/finishLesson.py | b0606b15a12b63a62da8682a66cda3bd2bdb1644 | [] | no_license | fuhailong1998/fuckQHD | 9c24f42f3413f2745c3e884159135b72831f87d2 | fa99773573966ce19e6c44210c124fb95a1c062d | refs/heads/main | 2023-03-27T03:51:18.702024 | 2021-04-01T03:55:10 | 2021-04-01T03:55:10 | 348,739,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
@file : finishLesson.py
@author : fxkxb.com
@date : 2021/3/15 19:46
@description :
"""
from ENV import header
from do.checkFace import checkFace
from do.finishExam import finishExam
from func.getTaskList import getTaskList
def finishLesson(sess, access_token, data, chdata):
page = data
task = getTaskList(sess, access_token, page)
# print(task)
ioo = 1
url = "https://jxjy.qhdjxjy.com/index.php/study/watchtime/watch-time?access-token="+access_token['access-token']
del page['access-token']
del page['page']
for each in task:
temp = {
'ware_id': each['ware_id'],
'watch_time': each['hours'],
'total_time': each['hours']
}
page.update(temp)
chdata['taskId'] = each['ware_id']
chdata['taskName'] = each['courseware_name']
try:
temp['watch_time'] = float(each['hours'])*0.95
except ValueError:
temp['watch_time'] = 60.0*0.95
sess.post(url=url, headers=header, data=page)
checkFace(sess, access_token, chdata)
temp['watch_time'] = each['hours']
text = sess.post(url=url, headers=header, data=page).text
print(str(ioo)+"/"+str(len(task))+" Video Competed!" + text + "\n")
ioo += 1
| [
"1414390893@qq.com"
] | 1414390893@qq.com |
974f336c7e01d4e4e036973bd52ef2a881a24bfc | bdb40d934790c6d28535a0b3a8c3aa720689ed12 | /flask_app/controllers/user_controller.py | 497cbb06f7a628266ae67845db6c30f07648af0a | [] | no_license | Nuanjan/recipes-belt-prep | ea2e5fe15cb465d22f01c9a0e3a055340102b44e | 252dc94adb61dde4560ec737f1836e92b8e451bf | refs/heads/master | 2023-08-29T05:24:30.631096 | 2021-10-12T21:08:54 | 2021-10-12T21:08:54 | 415,809,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,767 | py | from flask.helpers import flash
from flask_app import app
from flask import redirect, render_template, session, request, url_for
from flask_app.models.user import User
from flask_app.models.recipe import Recipe
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt(app) # we are creating an object called bcrypt,
@app.route('/')
def index():
isShow = ""
if 'isShow' in session:
isShow = session["isShow"]
return render_template('index.html', isShow=isShow)
@app.route('/register-login', methods=['POST'])
def register_user():
session['isShow'] = request.form['which_form']
if request.form['which_form'] == "register":
if not User.validate_user(request.form):
return redirect('/')
hashed_password = bcrypt.generate_password_hash(
request.form['password'])
data = {
"first_name": request.form['first_name'],
"last_name": request.form['last_name'],
"email": request.form['e_mail'],
"password": hashed_password
}
addUser = User.add_user(data)
if not addUser:
return redirect('/')
session['user_id'] = addUser
return redirect('/user_dashboard')
elif request.form['which_form'] == "login":
data = {"email": request.form['e_mail']}
# check if email exist in database
user_in_db = User.get_user_by_email(data)
validation_data = {
"user_in_db": user_in_db,
"password": request.form["password"]
}
if not User.validate_login_user(validation_data):
return redirect('/')
elif not bcrypt.check_password_hash(user_in_db.password, request.form['password']):
print(bcrypt.check_password_hash(
user_in_db.password, request.form['password']))
flash("Invalid user/password")
return redirect('/')
session['isShow'] = request.form['which_form']
session['user_id'] = user_in_db.id
return redirect('/user_dashboard')
@app.route('/user_dashboard')
def user_dashboard():
show_table = ""
if 'user_id' in session:
data = {
"id": session['user_id']
}
one_user = User.get_user_by_id(data)
all_recipes = Recipe.all_recipes_with_users()
print(len(all_recipes), " this is all recipes")
if all_recipes:
show_table = "true"
return render_template('user_dashboard.html', all_recipes=all_recipes, one_user=one_user, show_table=show_table)
else:
return redirect('/forbidden')
@app.route('/logout')
def log_out():
session.clear()
return redirect('/')
@app.route('/forbidden')
def unauthorize():
return render_template('forbidden.html')
| [
"nuanjan.schluntz@gmail.com"
] | nuanjan.schluntz@gmail.com |
49b4daaa6dc9149ee8413913d689fe2635a10972 | 896d7833597b2b02f3902a2b887417d67531c5ce | /ihome_demo/ihome/utils/commons.py | c223fe90074d18de46349e6630e86002e2e3d93b | [] | no_license | longshushu/ihome | fa3e320a8c23fbdde9fd94bc158029cfa9d96745 | 67267a82b5811fa84d760659beb4e8e2523bee06 | refs/heads/master | 2020-04-02T09:48:09.821626 | 2018-10-23T10:53:54 | 2018-10-23T10:53:54 | 154,305,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # coding=utf-8
from werkzeug.routing import BaseConverter
from flask import session, g, jsonify
import functools
from response_code import RET
class ReConvert(BaseConverter):
def __init__(self, url_map, regex):
super(ReConvert, self).__init__(url_map)
self.regex = regex
# 定义的验证登录状态的装饰器
def login_required(view_func):
# wraps函数的作用是将wrapper内层函数的属性设置为被装饰函数view_func的属性
@functools.wraps(view_func)
def wrapper(*args, **kwargs):
# 判断用户的登录状态
user_id = session.get("user_id")
# 如果用户是登录的, 执行视图函数
if user_id is not None:
# 将user_id保存到g对象中,在视图函数中可以通过g对象获取保存数据
g.user_id = user_id
return view_func(*args, **kwargs)
else:
# 如果未登录,返回未登录的信息
return jsonify(errno=RET.SESSIONERR, errmsg="用户未登录")
return wrapper | [
"2315333424@qq.com"
] | 2315333424@qq.com |
ea2f997b62563019e7c807ba6907e96ba06811c3 | ae301cbec2a4f58778c5c6e66a9768af02d9ff61 | /endpoints/note.py | 2b1d5451a3299002cf8d7fd398bcc0e3c214e0a3 | [] | no_license | ThiccShibe/meme-server | b3e07e8125be25b0442ba4af133d881674096299 | 17843e986073658ade419bdee08f8830e5b1c205 | refs/heads/master | 2020-04-05T19:25:31.609364 | 2018-11-07T16:17:17 | 2018-11-07T16:17:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
from flask import send_file
from utils.endpoint import Endpoint
from utils.textutils import wrap
class Note(Endpoint):
def generate(self, avatars, text, usernames):
base = Image.open('assets/note/note.png').convert('RGBA')
# We need a text layer here for the rotation
text_layer = Image.new('RGBA', base.size)
font = ImageFont.truetype(font='assets/fonts/sans.ttf', size=16)
canv = ImageDraw.Draw(text_layer)
text = wrap(font, text, 150)
canv.text((455, 420), text, font=font, fill='Black')
text_layer = text_layer.rotate(-23, resample=Image.BICUBIC)
base.paste(text_layer, (0, 0), text_layer)
b = BytesIO()
base.save(b, format='png')
b.seek(0)
return send_file(b, mimetype='image/png')
def setup():
return Note()
| [
"perry@frasers.org"
] | perry@frasers.org |
d44bd208f86c54c959cb1b2814f96ae3aa9ab1b8 | d172390766ea7181669bfee038dd46b0214b1ef6 | /search.py | 6a1978ca19cd7cec3332a01c71fa1af2b5219bf3 | [] | no_license | VSkapenko/python_home_tasks | 60c41cb5965665b00f93dddad2d2762017ca8bbe | 9b23627d07b6a6afa27993da0a2c15a65273beb2 | refs/heads/master | 2023-06-20T19:02:52.866904 | 2021-08-01T20:10:23 | 2021-08-01T20:10:23 | 387,208,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | import random
y = [random.randint(0, 9) for i in range(10)]
print(y)
z = int(input("Введите число : "))
b = y.index(z)
print("число " + str(z) + 'находится в списке под номером ' + str(b))
| [
"noreply@github.com"
] | noreply@github.com |
d0638c6ae59e6d09d07188bdd1eac27c052038b4 | f827b920c0a6adb08494fe6ffdfc542323d945c5 | /code/DP/tushar/string_interleaving.py | 8fde5670ab65540cb5e3532684ebf4101da04275 | [] | no_license | rawatsushil/datastructure | 3bf3509b3cc44d7dfb0bdf6e1b6280555252c1a8 | b7a183d2fc0803c9c143dc9bb012a8c776e0a55f | refs/heads/master | 2020-06-05T22:56:53.710453 | 2019-06-18T15:40:32 | 2019-06-18T15:40:32 | 192,568,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | class StringInterleaving:
def __init__(self, s1, s2, s3):
self.s1 = s1
self.s2 = s2
self.s3 = s3
self.arr = [[0 for x in range(len(self.s2) +1)] for y in range(len(self.s1) +1)]
self.make_matrix(self.arr)
def make_matrix(self, arr):
col_len = len(self.s2)
row_len = len(self.s1)
for i in range(0, row_len +1):
for j in range(0, col_len +1):
string_len = i+j
if i == 0 and j == 0:
arr[0][0] = 'T'
elif i == 0 and (self.s2[j-1] == self.s3[string_len-1]):
arr[i][j] = 'T'
elif j == 0 and (self.s1[i-1] == self.s3[string_len-1]):
arr[i][j] = 'T'
elif (self.s2[j-1] == self.s3[string_len-1]) and (self.s1[i-1] == self.s3[string_len-1]):
if arr[i-1][j] or arr[i][j-1] == 'T':
arr[i][j] ='T'
elif (self.s2[j-1] == self.s3[string_len-1]) and arr[i][j-1] =='T':
arr[i][j] = 'T'
elif (self.s1[i-1] == self.s3[string_len -1]) and arr[i-1][j] == 'T':
arr[i][j] = 'T'
def is_interleaved(self):
return self.arr[len(self.s1)][len(self.s2)] == 'T'
if __name__ == '__main__':
s1 = "YX"
s2 = "X"
s3 = "XXY"
si = StringInterleaving(s1,s2,s3)
print (si.is_interleaved())
| [
"noreply@github.com"
] | noreply@github.com |
06530ac12e6a05051acffad3236dc40602d54bda | 3ea3fd470a958f8db7d72d3fe284bfeb82c6cce8 | /stdplugins/pingbrog.py | bd617853a9e9782644819acc103bfcb677f05590 | [
"Apache-2.0"
] | permissive | annihilatorrrr/PepeBot | 29f7fcaee5225428b8009b6cb3b093c21b3a3e60 | 7394b1d958ac789fe5f427e632d566052634e680 | refs/heads/master | 2023-03-07T22:19:34.939628 | 2020-05-28T14:20:48 | 2020-05-28T14:20:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from datetime import datetime
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="ping ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
start = datetime.now()
mole = await event.reply("Pong..Speed!")
end = datetime.now()
ms = (end - start).microseconds / 1000
await mole.edit("Pong..Speed!\n`{}ms`".format(ms))
| [
"59915404+Sur-vivor@users.noreply.github.com"
] | 59915404+Sur-vivor@users.noreply.github.com |
c2d1e14450b6743f868b43606463e22a44bd92e0 | 1b10d96758c45e1746d8ca69b3222fc90aae012e | /payslip_payroll/models/models.py | 58dea6873291cb4f970116ad101da6f84df50833 | [] | no_license | hassanfadl/livedhalts | 1565e3b9dec1b470c1700b90c927724cf02a99ae | dab87ade79a97b616fda3b52c38b2cea695ee4d5 | refs/heads/main | 2023-06-29T04:29:34.679643 | 2021-07-15T11:11:43 | 2021-07-15T11:11:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,244 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
from datetime import datetime
class payroll(models.Model):
_inherit = 'hr.payslip'
month = fields.Char("Month", compute="compute_month")
year = fields.Char("Year", compute="compute_year")
dept = fields.Char("Department")
# date = fields.Date("Date")
date_start = fields.Char("Date To", compute="compute_dates")
amount_in_word = fields.Char("Amount In Words", compute="amount_in_words")
def amount_in_words(self):
currency_id = self.env['res.currency'].search([('name', '=', 'PKR')], limit=1)
print('as', currency_id)
word = ''
for i in self.line_ids:
if i.name == 'Net Salary':
word = str(currency_id.amount_to_text(i.amount)) + 's only.'
self.amount_in_word = word
def compute_month(self):
for i in self:
a = i.date_from.strftime("%B")
i.month = a
def compute_year(self):
for i in self:
a = i.date_from.strftime("%Y")
i.year = a
def compute_dates(self):
for i in self:
datetimeobject = datetime.strptime(str(i.date_from), '%Y-%m-%d')
print('az', datetimeobject)
newformat = datetimeobject.strftime('%d-%B-%Y')
i.date_start = newformat
print("a", newformat)
def allowance_deduction_compute(self):
for i in self.line_ids:
obj = self.env['hr.contract'].search([('employee_id.id', '=', self.employee_id.id)])
for j in obj:
if i.name == 'Conveyance':
i.amount = j.conveyance
if i.name == 'Mobile Allowance':
i.amount = j.mobile_allowance
if i.name == 'Meal Allowance':
i.amount = j.meal_allowance
if i.name == 'Other':
i.amount = j.other
if i.name == 'Income Tax':
i.amount = j.income_tax
if i.name == 'Advances':
i.amount = j.advances
if i.name == 'EOBI':
i.amount = j.eobi
if i.name == 'Provident Fund':
i.amount = j.provident_fund
if i.name == 'PESSI':
i.amount = j.pessi
if i.name == 'Other Deductions':
i.amount = j.other_deductions
for rec in self.line_ids:
if rec.amount == 0:
rec.unlink()
class AddAllowancesDeduction(models.Model):
_inherit = 'hr.contract'
conveyance = fields.Float("Conveyance")
mobile_allowance = fields.Float("Mobile Allowance")
meal_allowance = fields.Float("Meal Allowance")
other = fields.Float("Other")
income_tax = fields.Float("Income Tax")
advances = fields.Float("Advances")
eobi = fields.Float("EOBI")
provident_fund = fields.Float("Provident Fund")
pessi = fields.Float("PESSI")
other_deductions = fields.Float("Other Deductions")
class EmployeeInherit(models.Model):
_inherit = 'hr.employee'
payment_mode = fields.Selection([('cash', 'Cash'), ('bank_transfer', 'Bank Transfer')], string="Payment Mode")
| [
"lts.viltco@gmail.com"
] | lts.viltco@gmail.com |
070a1000c6a358e4cd0dfefbcdeba19993c03c47 | a0b2d893e7066249ca982ecdb69a65b8fec21c5e | /datapackage_pipelines_assembler/processors/clear_resources.py | 0439473a808c0c815b29cfd848983fdfe6350b1a | [
"MIT"
] | permissive | datopian/assembler | 0f2ae62a01dd453d35d693924a8a244e33f46c58 | c53a9d8e8567b3e5f2837308041b75160c931485 | refs/heads/master | 2021-09-22T01:43:40.901197 | 2018-09-05T04:25:41 | 2018-09-05T04:25:41 | 96,293,211 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | from datapackage_pipelines.wrapper import process
def modify_datapackage(dp, parameters, stats):
dp['resources'] = []
return dp
process(modify_datapackage=modify_datapackage)
| [
"noreply@github.com"
] | noreply@github.com |
7f66a25e2771b901757b16ec0dd262003ea3a7e7 | 3d691c0ae5dec985efd677dad52f162cfeab072f | /qfy_dynamixel/scripts/trajectory_mx_client.py | 64518501273c3088259d3bedfc7ce6fc2b9c7b43 | [] | no_license | hitsz-qfy/qfy_manipulator_repo | b22eb93b76977e1796978e655dfdec5037e457b5 | 97531aed934e0d644c09d5f7975d7876a1a1564f | refs/heads/master | 2021-09-14T16:26:30.018021 | 2017-06-19T11:41:53 | 2017-06-19T11:41:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | #!/usr/bin/env python
# @Author:qfyhaha
# @Description:
import roslib
roslib.load_manifest('qfy_dynamixel')
import rospy
from trajectory_class import *
if __name__ == '__main__':
rospy.init_node('trajectory_mx')
arm_m = Joint('m_arm')
rospy.spin() | [
"623696035@qq.com"
] | 623696035@qq.com |
8315f92eb87ff382410deb9ec82d96ec429ff849 | 1a5c8bff52e61850ee6c678cb051318ad5bf92e6 | /metode init/player.py | 4f88050661e5be153d1e7118ebbada47aed2448a | [] | no_license | Yudhi151811513042/object_oriented_programming_di_python | 137bcfc1c17d10ce14658f39884f2cf324fa096e | f31e80afc1a4f22d8c92c94ce503a31f8f5901a7 | refs/heads/master | 2022-12-05T19:45:21.100384 | 2020-08-10T08:01:24 | 2020-08-10T08:01:24 | 286,374,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | class Player:
name = ''
speed = ''
def __init__(self,name,speed):
self.name = name
self.speed = speed
def getName(self):
return self.name
def getSpeed(self):
return self.speed
player = Player('Dybala', '86')
player2 = Player('Messi', '94')
print(player.getName() + ' punya speed ' + player.getSpeed())
print(player2.getName() + ' punya speed ' + player2.getSpeed())
| [
"yudhiarnantorangga@gmail.com"
] | yudhiarnantorangga@gmail.com |
a14218e41f77aa8df05e6c2574dad3cb2c62882a | e2693c9b4aafc8c136e1201b056e8cee7cce758e | /Alice.py | e74ed651e2f2ac559c77c8499d364c0a7eb53dbe | [] | no_license | p0peax/DiffieHellman | 296ae87337df430da40e9b07becb70f67f080b2d | 1b546c19e6215761c75479dc38a436a793f60747 | refs/heads/master | 2021-06-10T05:49:38.269101 | 2016-10-13T00:41:36 | 2016-10-13T00:41:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,773 | py | from DiffieHellman import *
from Crypto.PublicKey import RSA
from ProgramConstants import g,p
class Alice(DiffieHellman):
def __init__(self, p_new, g_new, password):
DiffieHellman.__init__(self, p_new, g_new)
self.a = password
self.alpha = self.exponent_modulo(self.g, password, self.p)
print("The password is " + str(self.alpha))
def compute_k(self, text_message):
self.beta = int(text_message)
K = self.exponent_modulo(self.beta, self.a, self.p)
m = str(str(self.alpha) + str(self.beta)).encode()
return self.compute_m(m,K)
def validatem2(self, m2):
k_temp = self.exponent_modulo(self.beta, self.a, self.p)
m_temp = (str(self.beta) + str(self.alpha)).encode()
m = int.from_bytes(self.compute_m(m_temp, k_temp), "big")
print("m = " + str(m))
print("m1 = " + str(m2))
if m == int(m2):
return True
return False
def main():
password = input("Please enter a password")
a = Alice(p, g, int(password))
input("Click enter when the key is ready")
f = open('static/publickey.pub')
ik = f.read()
pk = RSA.importKey(ik)
text = input("Please enter the text message")
#Generate Alice's m
m1 = int.from_bytes(a.compute_k(text),"big")
print(m1)
#Receive m from server
m2 = input("Please enter the server's m2")
if a.validatem2(m2) :
print("The keys are validated")
else:
print ("The keys are NOT validated")
sig = input("Please enter the signature code")
if pk.verify(int(m2), tuple([int(sig),''])):
print("The signature has been verified")
else:
print ("The signature has not been validated")
if __name__ == "__main__":
main() | [
"svang047@uottawa.ca"
] | svang047@uottawa.ca |
2d2063c996b232301e72b2482fdb7002f52dfd21 | 0cf52aa0b33c1c1e79f309488dbb48895a2043a0 | /admin_login/views.py | 724b090f994ffa69c2a06e4cb3f237e70c84aa62 | [] | no_license | REHNAKREMESAN/MiLaH | 81c0f9a51065bb3890935b7f53be1c6444ca2a48 | 03d647f2201f494ace0436992ba26b3c0f19a5e1 | refs/heads/master | 2020-04-07T23:10:11.722496 | 2018-11-27T10:38:57 | 2018-11-27T10:38:57 | 158,801,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | from django.shortcuts import render, redirect
from .import forms
from django.contrib.auth import authenticate
def adminhome(request):
return render(request, 'HomepageAdmin/homepageadmin.html', {'logid': request.session['logid']})
def login(request):
if request.method == 'POST':
form = forms.AdminLoginForms(request.POST)
if form.is_valid():
userObj = form.cleaned_data
username = userObj['username']
password = userObj['password']
user = authenticate(request, username=username, password=password)
if user is not None:
request.session['logid'] = user.id
request.session['logname'] = user.username
return redirect('admin_login:adminhome')
else:
return render(request, 'admin_login/admin_login.html', {'form': form})
else:
form = forms.AdminLoginForms()
return render(request, 'admin_login/admin_login.html', {'form': form})
# Create your views here.
| [
"noreply@github.com"
] | noreply@github.com |
6eb18e8602669ca83e45a4f13c88cb25f0e074d9 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/contour/_legendgrouptitle.py | b1b60ffa75052f5f42263e2e79d70dace693855c | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 689 | py | import _plotly_utils.basevalidators
class LegendgrouptitleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="legendgrouptitle", parent_name="contour", **kwargs):
super(LegendgrouptitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Legendgrouptitle"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""",
),
**kwargs
)
| [
"nicolas@plot.ly"
] | nicolas@plot.ly |
c2f4046f240cdaba192eaa374d9b16cb3d261f5c | 7272664f64ce4e6fec72aedb537056be574ffadd | /base/migrations/0074_bugcomments.py | c3cd499b7fb86a72e42123444e1fc25715bca26b | [
"MIT"
] | permissive | gade-raghav/project-enhancements | 8456c9e35287685c0abd61dbf059a533dbef61e2 | 6303f6d6772f1e1b21693eb4ce6c9dbf6b7f49ca | refs/heads/master | 2023-01-25T02:58:15.519644 | 2020-11-21T13:37:07 | 2020-11-21T13:37:07 | 292,335,049 | 0 | 0 | MIT | 2020-10-16T20:00:44 | 2020-09-02T16:24:50 | Python | UTF-8 | Python | false | false | 800 | py | # Generated by Django 3.1 on 2020-09-28 13:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0073_bug_ticket_status'),
]
operations = [
migrations.CreateModel(
name='BugComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=100)),
('comment', models.TextField(null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('tracking', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='base.bug')),
],
),
]
| [
"gaderaghav5@gmail.com"
] | gaderaghav5@gmail.com |
48c565eab868102182916298a1d250c46179335e | 8425dd7fe9ab2cc794f08714931b4449c59eb51b | /magics_with_UCM_ages.py | 5f94149e1e51d4722391efd7c84c88f363dd022b | [] | no_license | GioValca/Recommender-Systems-2019 | f498d84ef5a421da5632de6173ce763e27b8a794 | d8b54578f7d1e6453785c216f253404aabfbaba7 | refs/heads/master | 2023-02-22T12:49:32.801064 | 2021-01-26T19:38:12 | 2021-01-26T19:38:12 | 333,192,671 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,265 | py | import pandas as pd
import scipy.sparse as sps
from Algorithms import TopPopRecommender
data = pd.read_csv('dataset/data_UCM_age.csv') # reading the file with pandas library
userList = list(data["row"])
ageList = list(data["col"])
ratingList = list(data["data"])
age0 = []
age1 = []
age2 = []
age3 = []
age4 = []
age5 = []
age6 = []
age7 = []
age8 = []
age9 = []
age10 = []
for i in range(len(userList)):
if ageList[i] == 0:
age0.append(userList[i])
if ageList[i] == 1:
age1.append(userList[i])
if ageList[i] == 2:
age2.append(userList[i])
if ageList[i] == 3:
age3.append(userList[i])
if ageList[i] == 4:
age4.append(userList[i])
if ageList[i] == 5:
age5.append(userList[i])
if ageList[i] == 6:
age6.append(userList[i])
if ageList[i] == 7:
age7.append(userList[i])
if ageList[i] == 8:
age8.append(userList[i])
if ageList[i] == 9:
age9.append(userList[i])
if ageList[i] == 10:
age10.append(userList[i])
data = pd.read_csv('dataset/data_train.csv') # reading the file data train with pandas library
userList = list(data["row"]) # set of users that have a rating for at least one item
itemList = list(data["col"]) # set of items that were rated by at least one user
ratingList = list(data["data"])
URM_all = sps.coo_matrix((ratingList, (userList, itemList)))
URM_all = URM_all.tocsr()
matrix = sps.csr_matrix(URM_all)
# urm0 = matrix[age0, :]
urm1 = matrix[age1, :]
urm2 = matrix[age2, :]
urm3 = matrix[age3, :]
urm4 = matrix[age4, :]
urm5 = matrix[age5, :]
urm6 = matrix[age6, :]
urm7 = matrix[age7, :]
urm8 = matrix[age8, :]
urm9 = matrix[age9, :]
urm10 = matrix[age10, :]
urm_list = [urm1, urm2, urm3, urm4, urm5, urm6, urm7, urm8, urm9, urm10]
age_list = [age1, age2, age3, age4, age5, age6, age7, age8, age9, age10]
def get_age_list():
return age_list
j = 0
for i in urm_list:
recommenderTP = TopPopRecommender()
recommenderTP.fit(i)
filename = 'TopPopForAge' + '{}'.format(j+1)
file = open('myFiles/' + filename + '.txt', 'w')
items_rec = recommenderTP.recommend(age_list[j][0], at=10)
sarr = [str(a) for a in items_rec]
print(' '.join(sarr), file=file)
file.close()
j += 1
| [
"valcarenghi.valcarenghi@gmail.com"
] | valcarenghi.valcarenghi@gmail.com |
cda672563f45afd0e3b49491e35c2d392b412907 | e2fdb12d9f4c92cd4b0803a438d579c8f624db17 | /tests/visualization_test.py | c6cb47602b2244a267db957d76269a5c01ccf7a2 | [] | no_license | NareshPS/humpback-whale | fcd47b7140c5bd9bd28194514c805a866dbabe4d | 08b697bc88667c1ded4d8fc8a102cf7432e31596 | refs/heads/master | 2020-04-10T23:41:46.905949 | 2019-04-14T21:19:39 | 2019-04-14T21:19:39 | 161,361,517 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,613 | py | #Unittests
import unittest as ut
#Local imports
from model_utils import load_pretrained_model
from visualization import HistoryInsights
from visualization import ModelInsights
from visualization import WeightInsights
from visualization import PlottingUtils
from common import ut_constants
model_name = "cnn_model2d_1"
store = ut_constants.DATA_STORE
class TestHistoryInsights(ut.TestCase):
def test_accuracy(self):
_, history = load_pretrained_model(model_name, store)
insights = HistoryInsights(history)
insights.accuracy()
def test_loss(self):
_, history = load_pretrained_model(model_name, store)
insights = HistoryInsights(history)
insights.loss()
class TestPlottingUtils(ut.TestCase):
grid_dimensions = (5, 3)
def test_get_plot_axes(self):
plot_id_locations = {
0: (0, 0),
2: (0, 2),
5: (1, 2),
14: (4, 2)
}
for plot_id, expected_loc in plot_id_locations.items():
location = PlottingUtils.get_plot_axes(TestPlottingUtils.grid_dimensions, plot_id)
self.assertEqual(location, expected_loc, "Got unexpected location: {} for plot_id: {}".format(location, plot_id))
def test_create_plot_d(self):
grid_dimensions = (5, 3)
figure, axes = PlottingUtils.create_plot_d(grid_dimensions)
self.assertEqual(len(axes), 15, "Expected 15 axes for the grid with dimensions {}".format(grid_dimensions))
self.assertEqual(len(figure.get_axes()), 15, "Expected 15 axes for the grid with dimensions {}".format(grid_dimensions))
def test_create_plot_n(self):
n_graphs = 6
figure, axes = PlottingUtils.create_plot_n(n_graphs)
self.assertEqual(len(axes), 6, "Expected 6 axes for {} graphs".format(n_graphs))
self.assertEqual(len(figure.get_axes()), 6, "Expected 6 axes for {} graphs".format(n_graphs))
class TestWeightInsights(ut.TestCase):
def test_get_conv_weights(self):
model, _ = load_pretrained_model(model_name, store)
model_insights = ModelInsights(model)
weights = model_insights.get_conv_weights()
for l_name, l_weights in weights.items():
print(l_name)
print(l_weights)
class TestModelInsights(ut.TestCase):
def test_summary(self):
model, _ = load_pretrained_model(model_name, store)
summary = ModelInsights(model)
summary.summary()
if __name__ == "__main__":
ut.main() | [
"NareshPS@github.com"
] | NareshPS@github.com |
e7da941d4d79c7d0bdf2b212369d7b15c19536b2 | 18ea7633140d38c54bcda1e7064548933fc08581 | /source.py | 188d818835ca223cfb0031971e715ac2b5541f18 | [
"MIT"
] | permissive | valency/copyright | af32ffc007719ab31e943afcb698222f2f35afd6 | f07ae9843166fe2e7218b8915f82456986271a3c | refs/heads/master | 2022-07-19T12:25:43.412039 | 2020-05-27T09:13:15 | 2020-05-27T09:13:15 | 267,269,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | import argparse
import logging
import os
from deeputils.logger import setup_log
class Source:
def __init__(self, output):
self.output = output
def run(self, path):
with open(self.output, 'w') as output:
for p, _, f in os.walk(path):
for i in f:
u = os.path.join(p, i)
logging.info(u)
output.write('========================================\n')
output.write(u)
output.write('\n========================================\n')
with open(u, 'r') as m:
try:
for line in m.readlines():
output.write(line)
output.write('\n')
except Exception as exp:
logging.error(exp)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true', default=False, help='show debug information')
parser.add_argument('input', type=str, help='source code root path')
parser.add_argument('output', type=str, help='output file name')
args, _ = parser.parse_known_args()
setup_log(level=logging.DEBUG if args.debug else logging.INFO)
Source(args.output).run(args.input)
| [
"guiewy@gmail.com"
] | guiewy@gmail.com |
001346d4e4c97719b890e12f1c6ac8915031205e | 19e770624aae681ceb65e546c342b4aa9676b1cb | /decorator_file/app.py | 2c804a2d49c4449cb15b95ddf1574bf97f802ca1 | [] | no_license | lokichowdary/database_connection | ae57675e9d2b822f07f3c49544101ab59db2c0cb | 0e4c3f01be3454accd76f1684b9f23b5796cd871 | refs/heads/master | 2023-03-30T06:43:54.572174 | 2021-03-30T17:33:47 | 2021-03-30T17:33:47 | 353,086,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,152 | py | from flask import Flask, request, jsonify, render_template
from utils import connection1
import re
import psycopg2
import json
import random
app = Flask(__name__)
@app.route('/')
def method_name():
return render_template('postgresql_data.html')
dict_1 = {}
list_1 = []
def check(func):
def change(e):
match = re.fullmatch('[a-zA-Z0-9._]+@[a-z.]+',e)
if match != None:
return func(e)
else:
return func('INVALID EMAIL')
return change
@check
def email_op(e):
return e
@app.route('/receiving', methods = ['POST', 'GET'])
def data_receving():
conn, cur = connection1()
if request.method == 'POST':
list_2 = ['ids', 'first_name', 'last_name', 'email', 'passwords']
list_3 = []
dict_2 = {}
for i in list_2:
data = request.form.get(i)
list_3.append(data)
print(list_3)
for i in range(len(list_3)):
dict_2.setdefault(list_2[i], list_3[i])
list_1.append(dict_2)
print(list_1)
with open("json_file.json","w+") as f:
json.dump(list_1, f, indent=5)
f.seek(0)
data = json.load(f)
print('successfully data fetched')
print(data)
for var in data:
if var['passwords'] == None:
var['passwords']=''.join((random.choice('abcdxyzpqrstuvwxyz12354@') for i in range(8)))
for i in data:
ids = i['ids']
first_name = i['first_name']
last_name = i['last_name']
email = email_op(i['email'])
passwords = i['passwords']
cur = conn.cursor()
insert_stmt = "INSERT INTO details (ids,first_name, last_name, email, passwords)\
VALUES (%s,%s,%s,%s,%s)"
data1 = (ids,first_name, last_name, email, passwords)
cur.execute(insert_stmt, data1)
conn.commit()
return "data inserted"
return 'done'
print ("Records created successfully")
if __name__ == '__main__':
app.run(debug=True)
| [
"loki.sai93@gmail.com"
] | loki.sai93@gmail.com |
96ebdad8c82b851b71e1b34b68ce0b4589e19566 | 3bbcda4d74d9aa65e5c705352a4a60d9db0c6a42 | /third_party/github.com/ansible/awx/awx_collection/plugins/modules/tower_project.py | 36a4f8666a065b5a2120168d85984ea3b0dc3f69 | [
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"Apache-2.0",
"GPL-3.0-or-later",
"JSON"
] | permissive | mzachariasz/sap-deployment-automation | 82ecccb5a438eaee66f14b4448d4abb15313d989 | cb4710f07bb01248de4255a0dc5e48eda24e2d63 | refs/heads/master | 2023-06-25T15:09:53.505167 | 2021-07-23T18:47:21 | 2021-07-23T18:47:21 | 388,017,328 | 1 | 0 | Apache-2.0 | 2021-07-23T18:47:22 | 2021-07-21T06:29:55 | HCL | UTF-8 | Python | false | false | 11,117 | py | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_project
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower projects
description:
- Create, update, or destroy Ansible Tower projects. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the project.
required: True
type: str
description:
description:
- Description to use for the project.
type: str
scm_type:
description:
- Type of SCM resource.
choices: ["manual", "git", "hg", "svn", "insights"]
default: "manual"
type: str
scm_url:
description:
- URL of SCM resource.
type: str
local_path:
description:
- The server playbook directory for manual projects.
type: str
scm_branch:
description:
- The branch to use for the SCM resource.
type: str
default: ''
scm_refspec:
description:
- The refspec to use for the SCM resource.
type: str
default: ''
scm_credential:
description:
- Name of the credential to use with this SCM resource.
type: str
scm_clean:
description:
- Remove local modifications before updating.
type: bool
default: 'no'
scm_delete_on_update:
description:
- Remove the repository completely before updating.
type: bool
default: 'no'
scm_update_on_launch:
description:
- Before an update to the local repository before launching a job with this project.
type: bool
default: 'no'
scm_update_cache_timeout:
description:
- Cache Timeout to cache prior project syncs for a certain number of seconds.
Only valid if scm_update_on_launch is to True, otherwise ignored.
type: int
default: 0
allow_override:
description:
- Allow changing the SCM branch or revision in a job template that uses this project.
type: bool
aliases:
- scm_allow_override
job_timeout:
description:
- The amount of time (in seconds) to run before the SCM Update is canceled. A value of 0 means no timeout.
default: 0
type: int
custom_virtualenv:
description:
- Local absolute file path containing a custom Python virtualenv to use
type: str
default: ''
organization:
description:
- Name of organization for project.
type: str
required: True
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
type: str
wait:
description:
- Provides option (True by default) to wait for completed project sync
before returning
- Can assure playbook files are populated so that job templates that rely
on the project may be successfully created
type: bool
default: True
notification_templates_started:
description:
- list of notifications to send on start
type: list
elements: str
notification_templates_success:
description:
- list of notifications to send on success
type: list
elements: str
notification_templates_error:
description:
- list of notifications to send on error
type: list
elements: str
extends_documentation_fragment: awx.awx.auth
'''
EXAMPLES = '''
- name: Add tower project
tower_project:
name: "Foo"
description: "Foo bar project"
organization: "test"
state: present
tower_config_file: "~/tower_cli.cfg"
- name: Add Tower Project with cache timeout and custom virtualenv
tower_project:
name: "Foo"
description: "Foo bar project"
organization: "test"
scm_update_on_launch: True
scm_update_cache_timeout: 60
custom_virtualenv: "/var/lib/awx/venv/ansible-2.2"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
import time
from ..module_utils.tower_api import TowerAPIModule
def wait_for_project_update(module, last_request):
# The current running job for the udpate is in last_request['summary_fields']['current_update']['id']
if 'current_update' in last_request['summary_fields']:
running = True
while running:
result = module.get_endpoint('/project_updates/{0}/'.format(last_request['summary_fields']['current_update']['id']))['json']
if module.is_job_done(result['status']):
time.sleep(1)
running = False
if result['status'] != 'successful':
module.fail_json(msg="Project update failed")
module.exit_json(**module.json_output)
def main():
# Any additional arguments that are not fields of the item can be added here
argument_spec = dict(
name=dict(required=True),
description=dict(),
scm_type=dict(choices=['manual', 'git', 'hg', 'svn', 'insights'], default='manual'),
scm_url=dict(),
local_path=dict(),
scm_branch=dict(default=''),
scm_refspec=dict(default=''),
scm_credential=dict(),
scm_clean=dict(type='bool', default=False),
scm_delete_on_update=dict(type='bool', default=False),
scm_update_on_launch=dict(type='bool', default=False),
scm_update_cache_timeout=dict(type='int', default=0),
allow_override=dict(type='bool', aliases=['scm_allow_override']),
job_timeout=dict(type='int', default=0),
custom_virtualenv=dict(),
organization=dict(required=True),
notification_templates_started=dict(type="list", elements='str'),
notification_templates_success=dict(type="list", elements='str'),
notification_templates_error=dict(type="list", elements='str'),
state=dict(choices=['present', 'absent'], default='present'),
wait=dict(type='bool', default=True),
)
# Create a module for ourselves
module = TowerAPIModule(argument_spec=argument_spec)
# Extract our parameters
name = module.params.get('name')
description = module.params.get('description')
scm_type = module.params.get('scm_type')
if scm_type == "manual":
scm_type = ""
scm_url = module.params.get('scm_url')
local_path = module.params.get('local_path')
scm_branch = module.params.get('scm_branch')
scm_refspec = module.params.get('scm_refspec')
scm_credential = module.params.get('scm_credential')
scm_clean = module.params.get('scm_clean')
scm_delete_on_update = module.params.get('scm_delete_on_update')
scm_update_on_launch = module.params.get('scm_update_on_launch')
scm_update_cache_timeout = module.params.get('scm_update_cache_timeout')
allow_override = module.params.get('allow_override')
job_timeout = module.params.get('job_timeout')
custom_virtualenv = module.params.get('custom_virtualenv')
organization = module.params.get('organization')
state = module.params.get('state')
wait = module.params.get('wait')
# Attempt to look up the related items the user specified (these will fail the module if not found)
org_id = module.resolve_name_to_id('organizations', organization)
if scm_credential is not None:
scm_credential_id = module.resolve_name_to_id('credentials', scm_credential)
# Attempt to look up project based on the provided name and org ID
project = module.get_one('projects', **{
'data': {
'name': name,
'organization': org_id
}
})
if state == 'absent':
# If the state was absent we can let the module delete it if needed, the module will handle exiting from this
module.delete_if_needed(project)
# Attempt to look up associated field items the user specified.
association_fields = {}
notifications_start = module.params.get('notification_templates_started')
if notifications_start is not None:
association_fields['notification_templates_started'] = []
for item in notifications_start:
association_fields['notification_templates_started'].append(module.resolve_name_to_id('notification_templates', item))
notifications_success = module.params.get('notification_templates_success')
if notifications_success is not None:
association_fields['notification_templates_success'] = []
for item in notifications_success:
association_fields['notification_templates_success'].append(module.resolve_name_to_id('notification_templates', item))
notifications_error = module.params.get('notification_templates_error')
if notifications_error is not None:
association_fields['notification_templates_error'] = []
for item in notifications_error:
association_fields['notification_templates_error'].append(module.resolve_name_to_id('notification_templates', item))
# Create the data that gets sent for create and update
project_fields = {
'name': name,
'scm_type': scm_type,
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_refspec': scm_refspec,
'scm_clean': scm_clean,
'scm_delete_on_update': scm_delete_on_update,
'timeout': job_timeout,
'organization': org_id,
'scm_update_on_launch': scm_update_on_launch,
'scm_update_cache_timeout': scm_update_cache_timeout,
'custom_virtualenv': custom_virtualenv,
}
if description is not None:
project_fields['description'] = description
if scm_credential is not None:
project_fields['credential'] = scm_credential_id
if allow_override is not None:
project_fields['allow_override'] = allow_override
if scm_type == '':
project_fields['local_path'] = local_path
if scm_update_cache_timeout != 0 and scm_update_on_launch is not True:
module.warn('scm_update_cache_timeout will be ignored since scm_update_on_launch was not set to true')
# If we are doing a not manual project, register our on_change method
# An on_change function, if registered, will fire after an post_endpoint or update_if_needed completes successfully
on_change = None
if wait and scm_type != '':
on_change = wait_for_project_update
# If the state was present and we can let the module build or update the existing project, this will return on its own
module.create_or_update_if_needed(
project, project_fields,
endpoint='projects', item_type='project',
associations=association_fields,
on_create=on_change, on_update=on_change
)
if __name__ == '__main__':
main()
| [
"joseph.wright@googlecloud.corp-partner.google.com"
] | joseph.wright@googlecloud.corp-partner.google.com |
f28e12f28bdecf76a93eb4dc459ef28a441a2b31 | d81cce0ec5119dc8a1fd2c2adb909fb3fbeae28f | /day10.py | ba6ec7813af748f16ea50c3bc65409d10fec97dd | [] | no_license | ruri-watt/advent-of-code-2020 | 027ea8edc4e7bc9f0b34d2d8a3f654c60df53907 | a2ea841dd7198180c679304c8dd025bacf185044 | refs/heads/master | 2023-02-03T10:16:21.654074 | 2020-12-23T20:56:37 | 2020-12-23T20:56:37 | 322,310,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | import numpy as np
def part1():
with open('joltage-adapters.txt') as f:
adapters = [int(line) for line in f]
ratings = sorted(adapters + [0, max(adapters) + 3])
ratings = np.array(ratings)
diffs = ratings[1:] - ratings[:-1]
return np.count_nonzero(diffs == 1) * np.count_nonzero(diffs == 3)
def part2():
with open('joltage-adapters.txt') as f:
adapters = [int(line) for line in f]
ratings = sorted(adapters + [0, max(adapters) + 3])
neighbours = {rating: {r for r in ratings if 0 < r - rating <= 3} for rating in ratings}
destination = ratings[-1]
nr_paths = {destination: 1}
for n in reversed(ratings[:-1]):
nr_paths[n] = sum([nr_paths[i] for i in neighbours[n]])
return nr_paths[0]
if __name__ == '__main__':
print(part1())
print(part2())
| [
"rw@localhost.localdomain"
] | rw@localhost.localdomain |
bafd5c88ac2746968ac5dfd397a3836d2702e176 | b22a486411c09f9d345e8f8813c1897aae21e521 | /clusters/vc/common/home/sts/184764.job-reason/1.fill-queue/bin/poke_pending_jobs.py | 05e2aaf0697bd252e9aaad4b654a885c2873f65c | [
"BSD-3-Clause"
] | permissive | samcom12/hpc-collab | 566efb3e348486be97c52afe6b3452cb233a95b3 | 593ed35293fddec0354faf22105020e655707760 | refs/heads/main | 2023-05-08T10:05:23.050343 | 2021-03-26T20:20:12 | 2021-03-26T20:20:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | #!/usr/bin/python3
"""
This script pokes the pending jobs in the queue so that they will update
themselves, made in particular to update the Reason field.
Can be run manually or by a separate mechanism (interstate, cron, etc).
"""
from subprocess import getstatusoutput
# get list of jobs to poke
# pending jobs not with Resources or Priority reason
cmd = "squeue -t PD -O JobId,Reason -h | grep -Ev '(Resources|Priority)' | awk '{ print $1 }'"
rc, out = getstatusoutput(cmd)
if (rc !=0):
print("Error getting jobid's from squeue!")
print(out)
jobids=out.splitlines()
# loop through jobids
for job in jobids:
# verify ExcNodeList is null
cmd = "scontrol show job %s | grep -Po 'ExcNodeList=\K.*'"%(job)
rc, out = getstatusoutput(cmd)
if (rc !=0):
print("Error getting ExcNodeList from scontrol!")
print(out)
if (out != "(null)"): # ExcNodeList is populated
# poke job and preserve ExcNodeList
cmd = "scontrol update job %s ExcNodeList=%s"%(job,out)
rc, out = getstatusoutput(cmd)
if (rc !=0):
print("Error setting preserved ExcNodeList from scontrol!")
print(out)
else :
# poke job
cmd = 'scontrol update job %s ExcNodeList=""'%(job)
rc, out = getstatusoutput(cmd)
if (rc !=0):
print("Error setting ExcNodeList from scontrol!")
print(out)
print("Pending jobs have been poked") | [
"github@senator.net"
] | github@senator.net |
d78d173661f73c71aa2f2e72da15dfd4c9bce36f | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3/N.bu/C_CoinJam.py | 7e9c432700cd1fe34c8ed0dc525dd6c21db8812c | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 884 | py | import random
from math import *
used_string = set()
def find_div(n):
for i in range(2, ceil(sqrt(n)) + 1):
if n % i is 0:
return i
if (i > 200):
break
return 0
def check(s):
leg = []
for i in range(2, 11):
cur_number = 0
for c in s:
cur_number = cur_number*i + (ord(c) - ord('0'))
div = find_div(cur_number)
if div is 0:
return 0
else:
leg.append(div)
f_out.write(s)
for a in leg:
f_out.write(" " + str(a))
f_out.write("\n")
return 1
f_in = open('c.txt', 'r')
f_out = open('c.out', 'w')
f_out.write("Case #1:\n")
n = f_in.readline()
line = list(f_in.readline().split(" "))
n = int(line[0])
j = int(line[1])
result = 0;
while True:
s = "1";
for i in range(1, n - 1):
s += str(random.randrange(2))
s += "1";
if s in used_string:
continue
print(s)
used_string.add(s)
result += check(s)
print(result)
if result >= j:
break
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
56a55a5c0c5dcdcc2a5b076cd3b6a4ef9883c331 | bb6020fa623d74d68bcf086676d7a22b2794ba42 | /Introduction to Programming Exercises/ex6.py | 966a1da4798a65c6088e6a2fe8dc31cb63dc9619 | [] | no_license | saulquispe/My-Solutions-to-The-Python-Workbook-By-Ben-Stephenson-122-of-174- | 9861dc02b079d2574b61d3303296d53e9fa593bd | cef070a4639662fe7313362190397f4e9afe7b9c | refs/heads/master | 2023-08-31T17:11:07.033239 | 2020-07-09T07:19:00 | 2020-07-09T07:19:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | '''
Exercise 6:Tax and Tip
The program that you create for this exercise will begin by reading the cost of a meal
ordered at a restaurant from the user. Then your program will compute the tax and
tip for the meal. Use your local tax rate when computing the amount of tax owing.
Compute the tip as 18 percent of the meal amount (without the tax). The output from
your program should include the tax amount, the tip amount, and the grand total for
the meal including both the tax and the tip. Format the output so that all of the values
are displayed using two decimal places
'''
meal = float(input('What was the cost of your meal: '))
tax = 0.075 * meal
tip = 0.18 * meal
grand_total = tax + tip + meal
print('Tax $%.2f' % tax)
print('Tip $%.2f' % tip)
print('Grand Total $%.2f' % grand_total)
'''
# or
print('Tax is ${:.2f}, Tip is ${:.2f} and the Grand Total is ${:.2f}'. format(round(tax,2),round(tip,2),round(grand_total,2)))
''' | [
"noreply@github.com"
] | noreply@github.com |
97f4d98bb697a5235d161b63bf8d59775e72511d | f3eb9329bca4aebda7ab61a01160421985ad762e | /modules/util.py | 4e83f4fb830bd2c1b5db8f4fbaa31806c5f5f0d7 | [] | no_license | WeilunWang/Coherent-image-animation | 2e2e0177a87bcef0ed6b63e76f779253ae573d96 | 684ecb0909da9d6eb7b557ba1f78c7295340e5f4 | refs/heads/main | 2023-02-13T16:54:53.005349 | 2021-01-06T03:34:54 | 2021-01-06T03:34:54 | 327,186,741 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,776 | py | from torch import nn
import torch.nn.functional as F
import torch
from sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d
from sync_batchnorm import SynchronizedBatchNorm3d as BatchNorm3d
def kp2gaussian(kp: object, spatial_size: object, kp_variance: object) -> object:
"""
Transform a keypoint into gaussian like representation
"""
mean = kp['value']
coordinate_grid = make_coordinate_grid(spatial_size, mean.type())
number_of_leading_dimensions = len(mean.shape) - 1
shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape
coordinate_grid = coordinate_grid.view(*shape)
repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1)
coordinate_grid = coordinate_grid.repeat(*repeats)
# Preprocess kp shape
shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 2)
mean = mean.view(*shape)
mean_sub = (coordinate_grid - mean)
out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
return out
def make_coordinate_grid(spatial_size, type):
"""
Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
"""
h, w = spatial_size
x = torch.arange(w).type(type)
y = torch.arange(h).type(type)
x = (2 * (x / (w - 1)) - 1)
y = (2 * (y / (h - 1)) - 1)
yy = y.view(-1, 1).repeat(1, w)
xx = x.view(1, -1).repeat(h, 1)
meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
return meshed
def zip_dimT_to_dimBS(tensor):
"""
:param tensor: (N)D tensor: B, C, T, ...
:return: tensor_: (N-1)D tensor: B * T, C, ...
"""
shape = tensor.shape
tensor_ = tensor.transpose(1, 2).contiguous().view([shape[0] * shape[2], shape[1]] + list(shape[3:])).contiguous()
return tensor_
def unzip_dimT_from_dimBS(num_frame, tensor):
"""
:param num_frame: number of dimT
:param tensor: (N-1)D tensor: B * T, C, ...
:return: tensor_: (N)D tensor: B, C, T, ...
"""
shape = tensor.shape
tensor_ = tensor.view([-1, num_frame, shape[1]] + list(shape[2:])).contiguous().transpose(1, 2).contiguous()
return tensor_
def SoftCrossEntropyLoss(inputs, target, temperature=0.1):
log_likelihood = -F.log_softmax(inputs / temperature, dim=1)
prob_target = F.softmax(target / temperature, dim=1)
loss = torch.mul(log_likelihood, prob_target).sum(dim=1).mean()
return loss
def MatrixEqualityLoss(inputs, target):
eye_ = torch.matmul(inputs, torch.inverse(target))
eye = torch.eye(2).view(1, 1, 2, 2).type(eye_.type())
loss = torch.abs(eye - eye_).sum(dim=(1, 3, 4)).mean()
return loss
class ResBlock3d(nn.Module):
"""
Res block, preserve spatial resolution.
"""
def __init__(self, in_features, kernel_size, padding):
super(ResBlock3d, self).__init__()
if isinstance(padding, int):
padding = (padding, padding, padding)
self.conv1 = nn.Sequential(
nn.ReplicationPad3d((0, 0, 0, 0, padding[0], padding[0])),
nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
padding=(0, padding[1], padding[2]))
)
self.conv2 = nn.Sequential(
nn.ReplicationPad3d((0, 0, 0, 0, padding[0], padding[0])),
nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
padding=(0, padding[1], padding[2]))
)
self.norm1 = BatchNorm3d(in_features, affine=True)
self.norm2 = BatchNorm3d(in_features, affine=True)
def forward(self, x):
out = self.norm1(x)
out = F.relu(out, inplace=True)
out = self.conv1(out)
out = self.norm2(out)
out = F.relu(out, inplace=True)
out = self.conv2(out)
out += x
return out
class UpBlock2d(nn.Module):
"""
Upsampling block for use in decoder(2D).
"""
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
super(UpBlock2d, self).__init__()
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
padding=padding, groups=groups)
self.norm = BatchNorm2d(out_features, affine=True)
def forward(self, x):
out = F.interpolate(x, scale_factor=2)
out = self.conv(out)
out = self.norm(out)
out = F.relu(out, inplace=True)
return out
class UpBlock3d(nn.Module):
"""
Upsampling block for use in decoder.
"""
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
super(UpBlock3d, self).__init__()
if isinstance(padding, int):
padding = (padding, padding, padding)
self.conv = nn.Sequential(
nn.ReplicationPad3d((0, 0, 0, 0, padding[0], padding[0])),
nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
padding=(0, padding[1], padding[2]), groups=groups)
)
self.norm = BatchNorm3d(out_features, affine=True)
def forward(self, x):
shape = x.shape
x = x.transpose(1, 2).contiguous().view([shape[0] * shape[2], shape[1]] + list(shape[3:]))
out = F.interpolate(x, scale_factor=2)
out = out.view([shape[0], shape[2], shape[1]] + list(out.shape[2:])).contiguous().transpose(1, 2)
out = self.conv(out)
out = self.norm(out)
out = F.relu(out, inplace=True)
return out
class DownBlock2d(nn.Module):
"""
Downsampling block for use in encoder(2D).
"""
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
super(DownBlock2d, self).__init__()
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
padding=padding, groups=groups)
self.norm = BatchNorm2d(out_features, affine=True)
self.pool = nn.AvgPool2d(kernel_size=(2, 2))
def forward(self, x):
out = self.conv(x)
out = self.norm(out)
out = F.relu(out, inplace=True)
out = self.pool(out)
return out
class SameBlock2d(nn.Module):
"""
Simple block, preserve spatial resolution.
"""
def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1):
super(SameBlock2d, self).__init__()
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features,
kernel_size=kernel_size, padding=padding, groups=groups)
self.norm = BatchNorm2d(out_features, affine=True)
def forward(self, x):
out = self.conv(x)
out = self.norm(out)
out = F.relu(out)
return out
class Decoder3d(nn.Module):
"""
Hourglass Decoder
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Decoder3d, self).__init__()
up_blocks = []
for i in range(num_blocks)[::-1]:
in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
out_filters = min(max_features, block_expansion * (2 ** i))
up_blocks.append(UpBlock3d(in_filters, out_filters, kernel_size=3, padding=1))
self.up_blocks = nn.ModuleList(up_blocks)
self.out_filters = block_expansion + in_features
def forward(self, x):
out = x.pop()
for up_block in self.up_blocks:
out = up_block(out)
skip = x.pop()
out = torch.cat([out, skip], dim=1)
return out
class Encoder2d(nn.Module):
"""
Hourglass Encoder
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Encoder2d, self).__init__()
down_blocks = []
for i in range(num_blocks):
down_blocks.append(DownBlock2d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)),
min(max_features, block_expansion * (2 ** (i + 1))),
kernel_size=3, padding=1))
self.down_blocks = nn.ModuleList(down_blocks)
def forward(self, x):
outs = [x]
for down_block in self.down_blocks:
outs.append(down_block(outs[-1]))
return outs
class Decoder2d(nn.Module):
"""
Hourglass Decoder
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Decoder2d, self).__init__()
up_blocks = []
for i in range(num_blocks)[::-1]:
in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
out_filters = min(max_features, block_expansion * (2 ** i))
up_blocks.append(UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1))
self.up_blocks = nn.ModuleList(up_blocks)
self.out_filters = block_expansion + in_features
def forward(self, x):
out = x.pop()
for up_block in self.up_blocks:
out = up_block(out)
skip = x.pop()
out = torch.cat([out, skip], dim=1)
return out
class Hourglass2d(nn.Module):
"""
Hourglass architecture.
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Hourglass2d, self).__init__()
self.encoder = Encoder2d(block_expansion, in_features, num_blocks, max_features)
self.decoder = Decoder2d(block_expansion, in_features, num_blocks, max_features)
self.out_filters = self.decoder.out_filters
def forward(self, x):
return self.decoder(self.encoder(x))
class AntiAliasInterpolation2d(nn.Module):
"""
Band-limited downsampling, for better preservation of the input signal.
"""
def __init__(self, channels, scale):
super(AntiAliasInterpolation2d, self).__init__()
sigma = (1 / scale - 1) / 2
kernel_size = 2 * round(sigma * 4) + 1
self.ka = kernel_size // 2
self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka
kernel_size = [kernel_size, kernel_size]
sigma = [sigma, sigma]
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
self.scale = scale
def forward(self, input):
if self.scale == 1.0:
return input
out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))
out = F.conv2d(out, weight=self.weight, groups=self.groups)
out = F.interpolate(out, scale_factor=(self.scale, self.scale))
return out
| [
"noreply@github.com"
] | noreply@github.com |
0054a608148118c98cc037f61165cbe401707630 | 2425d942129e803b06f4048259bd6ba8c69b7394 | /206_project_plan.py | ae84c20aa4495d562358180e50283d28b471b17a | [] | no_license | abril427/SI206-Final-Project | 4b7562170518e96f5b1452de09041fbb03895c69 | 8c6421f508b9afa7f60909f8eaa358de9c3c0326 | refs/heads/master | 2021-01-19T18:36:57.099812 | 2017-04-24T22:21:00 | 2017-04-24T22:21:00 | 88,371,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,228 | py | ## Your name: Abril Vela
## The option you've chosen: Option 2
# Put import statements you expect to need here!
import unittest
import tweepy
import twitter_info # my personal private twitter_info
import json
import sqlite3
# Write your test cases here.
class TwitterDataTests(unittest.TestCase):
def test_twitter_caching(self):
fstr = open("206_final_project_cache.json","r").read()
self.assertTrue("Moonlight" in fstr) #Moonlight will be one of the search terms
def test_tweets_type(self):
self.assertEqual(type(searched_tweets),type([]))
def test_tweets_type2(self):
self.assertEqual(type(searched_tweets[18]),type({"hi":3})) #check to see that object in list is a dictionary
def test_get_user(self):
tweets = open("206_final_project_cache.json","r").read()
tweet = Tweet(tweets[0])
user = tweet.get_twitter_user()
self.assertTrue(tweet.user, user)
def test__str__(self):
tweet_dict = {user_id: '898832', text: 'This is text', tweet_id: '982381', movie_title: 'This is the title', num_favs: 7, retweets: 10}
tweet = Tweet(tweet_dict)
tweet_str = tweet.__str__()
self.assertTrue("This tweet, 'This is a text', was tweeted tweeted by user 898832 and has 7 favorites and 10 retweets.", "This tweet, 'This is a text', was tweeted tweeted by user 898832 and has 7 favorites and 10 retweets.")
class Movie(unittest.TestCase):
def test_type_searh(self):
self.assertEqual(type(self.search(["term1, term2, term3"])), type([{"hi": 1}]) )
class DatabaseTests(unittest.TestCase):
def test_users(self):
conn = sqlite3.connect('finalproject.db')
cur = conn.cursor()
cur.execute('SELECT * FROM Users');
result = cur.fetchall()
self.assertTrue(len(result)>=2,"Testing that there are at least 2 distinct users in the Users table")
conn.close()
def test_movies(self):
conn = sqlite3.connect('finalproject.db')
cur = conn.cursor()
cur.execute('SELECT * FROM Movies');
result = cur.fetchall()
self.assertTrue(len(result) == 3,"Testing that there are at 3 distinct movies in the Movies table")
conn.close()
if __name__ == "__main__":
unittest.main(verbosity=2)
## Remember to invoke all your tests... | [
"abril427@gmail.com"
] | abril427@gmail.com |
abe75e0604231e6222b7a2c2cf9fddf840ff6afe | f6d7c30a7ed343e5fe4859ceaae1cc1965d904b7 | /htdocs/submissions/abe75e0604231e6222b7a2c2cf9fddf840ff6afe.py | 502c995667746a57f51b5c62ffaa1d60e127b87f | [] | no_license | pycontest/pycontest.github.io | ed365ebafc5be5d610ff9d97001240289de697ad | 606015cad16170014c41e335b1f69dc86250fb24 | refs/heads/master | 2021-01-10T04:47:46.713713 | 2016-02-01T11:03:46 | 2016-02-01T11:03:46 | 50,828,627 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 122 | py | j=''.join;seven_seg=lambda x:j(j(' _ _|_|_ | |'[ord('fÚ($ºDFZ64'[int(i)])/d&14:][:3]for i in x)+'\n'for d in(64,8,1)) | [
"info@pycontest.net"
] | info@pycontest.net |
fb4d31b4ed1d65ad00156600007e0fbde4e73937 | f724e86aa0b62e638f037834714174eb4421740c | /bin/twitterbot | cbfd661508159e5c823156c483ed127ded1a5f92 | [] | no_license | theparadoxer02/itoucan_venv | 4990e0624a506a95e824895cedae650ceaadfaa6 | 9ac5a55d7cb6670cfb2b1a67a6bd5dd8be9a9850 | refs/heads/master | 2020-03-25T07:23:36.781760 | 2018-08-04T19:44:00 | 2018-08-04T19:44:00 | 143,558,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | #!/mnt/800GB/itoucan/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from twitter.ircbot import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"abhimanyu98986@gmail.com"
] | abhimanyu98986@gmail.com | |
1592ebb4a760f5e41df0d99e960d7ec6bec3580b | feae213de87d4f113706efa607ef471c652213f3 | /src/dhondt.py | 702bf3aa2bc6f6b85f892c31f7c7b1d5d011844a | [] | no_license | yisuscc/proyecto_dhont | 1426e4ebfddb9d0148ca02363a0122b08febec74 | a20d4a6cc3d89020565ed7f295ac0ea946eaf8a9 | refs/heads/main | 2023-03-04T02:14:41.179768 | 2021-01-20T14:34:23 | 2021-01-20T14:34:23 | 331,069,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | '''
Created on 19 ene. 2021
@author: jesus
'''
from collections import namedtuple
import csv
Votos = namedtuple('Votos', 'provincia,partido,votos')
votos = Votos # para que salte el autocompletar
def lee_escrutinio (ruta_votos,ruta_escanos):
dicc_escanos = dict()
with open(ruta_votos, encoding='utf-8') as rv:
lector = csv.reader(rv)
next(lector)
lista_votos = [Votos(provincia,partido,int(votos)) for provincia, partido, votos in lector]
with open(ruta_escanos, encoding='utf-8') as re:
lector = csv.reader(re)
next(lector)
for provincia, escano in lector:
dicc_escanos[provincia] = escano
return lista_votos, dicc_escanos
def calcula_provincias(lista_votos):
res = {votos.provincia for votos in lista_votos}
return res
def calcula_partidos(lista_votos):
pass
def calcula_provincia(lista_votos,provincia):
pass
def calcula_diccionario_provincias(lista_votos):
pass
def totales_por_partido(lista_votos):
pass
def genera_diagrama_tarta(dicc, limite):
pass
def genera_mapa_calor(dicc_2d, limite_columnas):
pass
def calcula_tabla_porcentajes(dicc_2d):
pass
def calcula_escaños_provincia(dicc, total_escaños, exclusion):
pass
def calcula_tabla_escaños(dicc_2d):
pass
| [
"jescarcar5@alum.us.es"
] | jescarcar5@alum.us.es |
5bbe4f70bc23b531ef2d5cdd300592cc0d8033d4 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/semantic_segmentation/BiseNetV1_for_PyTorch/configs/_base_/datasets/ade20k.py | dbc6235a87e790bacdbee49892650fbc29f29a53 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,546 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| [
"chenyong84@huawei.com"
] | chenyong84@huawei.com |
2f07cb5a65ea76b088031fc22c625cf4b999e20f | 16cf8cb3d1c79afa90618799e5a008318a235758 | /movies/migrations/0002_auto_20210303_0635.py | 73eb2f86acb52e394a58e6f8603e4b7c8e6e7d57 | [] | no_license | stlegion277/MovieSite | 53ccd50b3bc94eba6bf6937f9f6d0de784ea4076 | 2057b0a264a360533f472c00f88bc9ec02e067b3 | refs/heads/master | 2023-03-20T21:17:33.223713 | 2021-03-12T08:11:33 | 2021-03-12T08:11:33 | 346,974,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # Generated by Django 3.1.7 on 2021-03-03 06:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('movies', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='rating',
name='movie',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.movies', verbose_name='фильм'),
),
]
| [
"sstlegion277@gmail.com"
] | sstlegion277@gmail.com |
2292fd0b6d12d024e4a04e98c37a5e44540f3aaf | 7b870523b8e432384cff27fd50056da8c6a5b1e3 | /leetcode/080删除排序数组中的重复项II.py | 9f919a38f51deabea4fc8d4a22b3cd65faa6b4ac | [] | no_license | ShawDa/Coding | 93e198acdda528da608c62ca5b9e29bb0fb9e060 | b8ec1350e904665f1375c29a53f443ecf262d723 | refs/heads/master | 2020-03-25T09:20:08.767177 | 2019-09-01T06:25:10 | 2019-09-01T06:25:10 | 143,660,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | # -*- coding:utf-8 -*-
__author__ = 'ShawDa'
class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) <= 2:
return len(nums)
cnt = 1
index = 1
for i in range(1, len(nums)):
if nums[i] != nums[i-1]:
nums[index] = nums[i]
cnt = 1
index += 1
elif cnt < 2:
nums[index] = nums[i]
cnt += 1
index += 1
else:
pass
return index
| [
"1315193735@qq.com"
] | 1315193735@qq.com |
20f896e3ff8064c129fce9f3925f8772875dc3f3 | f87dffb15d8620c06eed9f6ab63639ef4963b0e1 | /face/picture_save.py | 9b2dbc264770fade48703d69168763cba1121961 | [] | no_license | nananyang/ML-Face | 0efd72bec220f17adf8415a10fcff36faeaee2dc | d7f35b18b3acf65441c988b2b3ef277834899a3b | refs/heads/master | 2020-05-23T22:13:21.680393 | 2019-05-29T08:39:13 | 2019-05-29T08:39:13 | 186,969,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | import cv2
vs = cv2.VideoCapture(0)
i = 0
while True:
_, frame = vs.read()
if frame is None:
break
picture = cv2.resize(frame, dsize=(300,300))
cv2.imwrite('face-images/sangjun/{0}.jpg'.format(i),picture)
i+=1
if i%128 == 0:
print(i)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == ord('q') or i == 1024: break
vs.release()
cv2.destroyAllWindows()
| [
"unlocks@nate.com"
] | unlocks@nate.com |
8853bad43a746228b17368c0ae819e6e8099b0b1 | 15bfa12f4db81320b0f713a33cf941faae29d5d4 | /app/config/urls.py | a09f2c34160033355d1f9338dbf34db891f69a2c | [] | no_license | orca9s/yapen-pro | e8dec2c0e75f3259b8f28cb873fd7fddd04f8acb | ebc58246d5d0b1f4496bbc09e50fbfda6696ffd5 | refs/heads/master | 2022-12-10T12:48:55.105689 | 2018-07-31T12:16:30 | 2018-07-31T12:16:30 | 142,841,899 | 0 | 0 | null | 2022-01-21T19:35:41 | 2018-07-30T07:39:42 | Python | UTF-8 | Python | false | false | 919 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
urlpatterns += static(
prefix=settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT,
)
| [
"sang93423@gmail.com"
] | sang93423@gmail.com |
f712804807fed55b4cc6f50287d69858c43e7251 | fd4c0363e407b9be4d6b0a897ca15400333ce05d | /tetris/ClassicTetromino.py | f38b673b13ab87205042c20ead4f4e22d5d29c9f | [] | no_license | junghyun397/TetrisXQ | 13be3477a45a938368c74225e53e8ce9dd68c8e9 | 16106b47210fe1d0b8497ce2171dac02f1896694 | refs/heads/master | 2020-04-10T12:03:13.996028 | 2019-09-22T12:51:59 | 2019-09-22T12:51:59 | 161,010,029 | 13 | 3 | null | 2019-01-29T14:46:43 | 2018-12-09T06:06:30 | Python | UTF-8 | Python | false | false | 455 | py | from tetris.TetrominoInterface import TetrominoInterface
class ClassicTetromino(TetrominoInterface):
def build_tetromino(self):
return [
[[1, 1, 1, 1]], # I, 0
[[1, 1], [1, 1]], # O, 1
[[1, 1, 1], [0, 1, 0]], # T, 2
[[1, 1, 1], [0, 0, 1]], # J, 3
[[1, 1, 1], [1, 0, 0]], # L, 4
[[0, 1, 1], [1, 1, 0]], # S, 5
[[1, 1, 0], [0, 1, 1]] # Z, 6
]
| [
"junghyun397@gmail.com"
] | junghyun397@gmail.com |
e7d18b5d045fc011fa8c416bae026c4a0260d4ce | d4786e5d67a206c583d6b4bd3fa266df2ae21d20 | /MyModel.py | 1e410f0e59d62d2a6458105dce553ff663fffe4a | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | sekheng/cocos2dx-Server | 53968a5d4c17053034bffb0ee454ea3e221bce9d | 86ef691a51e10fd650d35f7f1754c3d976ba99f2 | refs/heads/master | 2021-03-22T01:33:10.941181 | 2018-02-09T11:27:07 | 2018-02-09T11:27:07 | 120,410,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from google.appengine.ext import ndb
class HighScore(ndb.Model):
mScore = ndb.FloatProperty()
mName = ndb.StringProperty()
isDeleted = ndb.BooleanProperty()
@staticmethod
def CreateHighscore(_score, _name):
return HighScore(mScore = _score, mName = _name, isDeleted = False) | [
"leesekheng@yahoo.com"
] | leesekheng@yahoo.com |
ab7b0fb7c27dcef026207458518195971155ace2 | e19d43d20957c67665fa558fcb39cdf37012cdbb | /scripts/plots/get_neighs.py | a3bda7fcbd5551561533a7529aa3e900e8cba337 | [] | no_license | GuodongYu/tBG | ede44a8f53e86c49fbee773fecb7a72a8b250c77 | 996248299c2de1ec7f553626cc6a9f280af7cd79 | refs/heads/master | 2023-03-27T01:19:17.540460 | 2021-03-28T22:42:11 | 2021-03-28T22:42:11 | 274,331,035 | 7 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | from tBG.molecule.round_disk import RoundDisk
from tBG.hopping import get_neighbor_shells_intralayer, filter_neig_list
import numpy as np
def get_rd_pmg_st():
rd = RoundDisk()
rd.make_structure(5)
rd.remove_top_layer()
rd.add_hopping_pz(max_dist=2.)
pmg_st = rd.pymatgen_struct()
return rd, pmg_st
rd, pmg_st = get_rd_pmg_st()
neigh_list = pmg_st.get_neighbor_list(10.0)
filtered_neigh_list = filter_neig_list(neigh_list)
dist_shell = get_neighbor_shells_intralayer(filtered_neigh_list, 8)[1]
rs = sorted(list(dist_shell.keys()))
a, b, c, d = neigh_list
d = np.round(d, 3)
id_from = 30
ids0 = np.where(a==id_from)[0]
xy0 = rd.coords[id_from][0:2]
for i in range(8):
r = rs[i]
ids1 =np.where(d==r)[0]
ids = np.intersect1d(ids0, ids1)
ids_to = b[ids]
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
rd.plot(fig, ax)
rd.add_hopping_pz(max_dist=2.)
rd.plot(fig, ax)
for id_to in ids_to:
xy1 = rd.coords[id_to][0:2]
plt.plot([xy0[0], xy1[0]], [xy0[1], xy1[1]], lw=3.0)
plt.savefig('NN%int.png' % i)
| [
"yugd@live.cn"
] | yugd@live.cn |
b66a49865d6da165e1f15de5f28755999c6b0068 | aa924bbb174676c00dfef9b976790a01506bd37e | /Tools/CollectLocalizedStrings.py | 5f122ee26c656edcd62ebb03d3c6a923ff04c55a | [
"MIT"
] | permissive | kovacsv/VisualScriptEngine | 5b3b8b7b6999082b0f916034a83d6e9675510ed6 | 185a451e2391fbff0bb4dd5929e8634517382c4e | refs/heads/master | 2023-03-15T19:00:48.535709 | 2021-12-17T15:47:51 | 2021-12-17T15:47:51 | 113,492,822 | 156 | 32 | MIT | 2023-02-24T16:07:36 | 2017-12-07T19:57:56 | C++ | UTF-8 | Python | false | false | 1,831 | py | import os
import sys
import re
import codecs
def GetFileContent (fileName):
file = codecs.open (fileName, 'r', 'utf-8')
content = file.read ()
file.close ()
return content
def GetFilesInDirectory (path):
fileList = []
for folder, subs, files in os.walk (path):
for fileName in files:
fileList.append (os.path.join (folder, fileName))
return fileList
def Main (argv):
currentDir = os.path.dirname (os.path.abspath (__file__))
os.chdir (currentDir)
argCount = len (argv)
if argCount < 3:
print ('usage: CollectLocalizedStrings.py <sourceFolder>* <resultPoFile>')
return 1
searchCriterias = [
'LocString\s*\(L"([^"]+)"\)',
'LocalizeString\s*\(L"([^"]+)"\)'
]
sourceFolders = []
for i in range (1, argCount - 1):
sourceFolders.append (argv[i])
resultPoFile = os.path.abspath (argv[argCount - 1])
stringEscapeChar = '\\"'
stringEscapePlaceholder = '$$$'
locStrings = []
for sourceFolder in sourceFolders:
sourceFiles = GetFilesInDirectory (sourceFolder)
for sourceFile in sourceFiles:
fileContent = GetFileContent (sourceFile)
fileContent = fileContent.replace (stringEscapeChar, stringEscapePlaceholder)
for searchCriteria in searchCriterias:
currLocStrings = re.findall (searchCriteria, fileContent)
for locString in currLocStrings:
locString = locString.replace (stringEscapePlaceholder, stringEscapeChar);
if not locString in locStrings:
locStrings.append (locString)
resultFileDir = os.path.dirname (resultPoFile)
if not os.path.exists (resultFileDir):
os.makedirs (resultFileDir)
poFile = codecs.open (resultPoFile, 'w', 'utf-8')
poFile.write ('\ufeff')
for locString in locStrings:
poFile.write ('msgid "' + locString + '"\n')
poFile.write ('msgstr "' + locString + '"\n\n')
poFile.close ()
return 0
sys.exit (Main (sys.argv))
| [
"viktorkovacs@gmail.com"
] | viktorkovacs@gmail.com |
5119527225ceb6c8426e13f85d82f904b18ac00d | ad08e80d591f97bbb3696b919e480073db11031c | /player/mac_algoritmasi.py | 1bdebe841d39d61b284ae9c258cd3a5910566532 | [] | no_license | diren97/Football-Simulator | c73e22ef25634a0b4e991a6fe6dc3e6942e17319 | 2683acb3864e669c011862bf66c428627a161218 | refs/heads/master | 2020-12-29T21:52:50.938693 | 2020-02-06T17:36:34 | 2020-02-06T17:36:34 | 238,745,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,736 | py | from random import randint
import time
import sys
class mac_algoritmasi:
aksiyon_sayisi = 0
guclu_takim = 0
aksiyon_dakikalari = []
def mac_ani(self,t,takim1,takim2,skor1=0,skor2=0):
while t > 0:
sys.stdout.write('\r{} {} -- {} -- {} {}'.format(takim1.get("takim_adi"),skor1,t,skor2,takim2.get("takim_adi")))
t -=1
sys.stdout.flush()
time.sleep(1)
for i in self.aksiyon_dakikalari:
""
def aks_sayi_takim_gucu(self, takim1, takim2, takim1_mevkiler, takim2_mevkiler):
oyuncu_sayilari = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
takim1_toplam_att = 0
takim1_toplam_def = 0
takim1_toplam_tec = 0
takim2_toplam_att = 0
takim2_toplam_def = 0
takim2_toplam_tec = 0
temp = 0
takim1_kaleci_sayisi = 1
for mevki in takim1_mevkiler:
if temp == 0:
takim1_defans_sayisi = mevki
elif temp == 1:
takim1_orta_sayisi = mevki
elif temp == 2:
takim1_forvet_sayisi = mevki
temp += 1
temp = 0
for mevki in takim2_mevkiler:
if temp == 0:
takim2_defans_sayisi = mevki
elif temp == 1:
takim2_orta_sayisi = mevki
elif temp == 2:
takim2_forvet_sayisi = mevki
temp += 1
temp = 0
for i in takim1:
if temp == 0:
takim1_toplam_tec += int(i.get("tec")) * 5
takim1_toplam_att += int(i.get("att")) * 0.5
takim1_toplam_def += int(i.get("def")) * 3
if temp in oyuncu_sayilari[1:takim1_defans_sayisi]:
takim1_toplam_tec += int(i.get("tec"))
takim1_toplam_att += int(i.get("att")) * 0.5
takim1_toplam_def += int(i.get("def")) * 5
elif temp in oyuncu_sayilari[takim1_defans_sayisi:takim1_defans_sayisi + takim1_orta_sayisi]:
takim1_toplam_tec += int(i.get("tec")) * 5
takim1_toplam_att += int(i.get("att")) * 2
takim1_toplam_def += int(i.get("def")) * 2
elif temp in oyuncu_sayilari[takim1_orta_sayisi + takim1_defans_sayisi:]:
takim1_toplam_tec += int(i.get("tec")) * 5
takim1_toplam_att += int(i.get("att")) * 5
takim1_toplam_def += int(i.get("def"))
temp += 1
temp = 0
for i in takim2:
if temp == 0:
takim2_toplam_tec += int(i.get("tec")) * 5
takim2_toplam_att += int(i.get("att")) * 0.5
takim2_toplam_def += int(i.get("def")) * 3
if temp in oyuncu_sayilari[1:takim1_defans_sayisi]:
takim2_toplam_tec += int(i.get("tec"))
takim2_toplam_att += int(i.get("att")) * 0.5
takim2_toplam_def += int(i.get("def")) * 5
elif temp in oyuncu_sayilari[takim1_defans_sayisi:takim1_defans_sayisi + takim1_orta_sayisi]:
takim2_toplam_tec += int(i.get("tec")) * 5
takim2_toplam_att += int(i.get("att")) * 2
takim2_toplam_def += int(i.get("def")) * 2
elif temp in oyuncu_sayilari[takim1_orta_sayisi + takim1_defans_sayisi:]:
takim2_toplam_tec += int(i.get("tec")) * 5
takim2_toplam_att += int(i.get("att")) * 5
takim2_toplam_def += int(i.get("def"))
temp += 1
takim1_tec_ort = float(takim1_toplam_tec / 11)
takim1_def_ort = float(takim1_toplam_def / 11)
takim1_att_ort = float(takim1_toplam_att / 11)
takim2_tec_ort = float(takim2_toplam_tec / 11)
takim2_def_ort = float(takim2_toplam_def / 11)
takim2_att_ort = float(takim2_toplam_att / 11)
a = takim1_att_ort - takim2_def_ort
b = takim2_att_ort - takim1_def_ort
c = takim1_tec_ort - takim2_tec_ort
att_def_farki = a - b
if att_def_farki > 0: # 1. takim diğer takima göre daha avantajli
if c > 0: # 1. takimin tekniği diğer takima göre daha avantajli
att_def_farki += 2 * c
self.guclu_takim = 1
else:
att_def_farki -= 2 * c
if att_def_farki < 0:
self.guclu_takim = 2
abs(att_def_farki)
else:
self.guclu_takim = 1
else:
if c > 0:
att_def_farki -= 2 * c
if att_def_farki < 0:
self.guclu_takim = 2
abs(att_def_farki)
else:
self.guclu_takim = 1
else:
att_def_farki += 2 * c
self.guclu_takim = 2
if att_def_farki / 3 < 5:
self.aksiyon_dakikalari = 5
else:
self.aksiyon_dakikalari = att_def_farki / 3
def aksiyon_dakikalari(self, aksiyon_sayisi):
self.aksiyon_dakikalari = []
for i in aksiyon_sayisi:
x = randint(0, 90)
self.aksiyon_dakikalari.append(x)
def sari_kart(self, takim_mevki):
if randint(0, 1):
a = randint(1, 100)
if a < 31:
defans = takim_mevki[0]
takim1_kart = randint(1, defans)
return takim1_kart
elif a < 81:
orta_saha = takim_mevki[1]
takim1_kart = randint[1, orta_saha]
return takim1_kart + int(takim_mevki[0])
else:
forvet = takim_mevki[2]
takim1_kart = randint[1, forvet]
return takim1_kart + int(takim_mevki[0] + takim_mevki[1])
def kirmizi_kart(self, takim1_mevki, takim2_mevki):
if randint(0, 1):
a = randint(1, 100)
if a < 51:
defans = takim1_mevki[0]
takim1_kart = randint(1, defans)
return takim1_kart
elif a < 91:
orta_saha = takim1_mevki[1]
takim1_kart = randint[1, orta_saha]
return takim1_kart + int(takim1_mevki[0])
else:
forvet = takim1_mevki[2]
takim1_kart = randint[1, forvet]
return takim1_kart + int(takim1_mevki[0] + takim1_mevki[1])
else:
a = randint(1, 100)
if a < 51:
defans = takim2_mevki[0]
takim2_kart = randint(1, defans)
return takim2_kart
elif a < 91:
orta_saha = takim2_mevki[1]
takim2_kart = randint[1, orta_saha]
return takim2_kart + int(takim2_mevki[0])
else:
forvet = takim2_mevki[2]
takim2_kart = randint[1, forvet]
return takim2_kart + int(takim2_mevki[0] + takim2_mevki[1])
def gol(self, takim1_mevki, takim2_mevki, guclu_takim_no=1):
if guclu_takim_no == 1:
guc = randint(1, 10)
if guc < 8:
a = randint(1, 100)
if a < 21:
defans = takim1_mevki[0]
takim1_gol = randint(1, defans)
return takim1_gol
elif a < 61:
orta_saha = takim1_mevki[1]
takim1_gol = randint[1, orta_saha]
return takim1_gol + int(takim1_mevki[0])
else:
forvet = takim1_mevki[2]
takim1_gol = randint[1, forvet]
return takim1_gol + int(takim1_mevki[0] + takim1_mevki[1])
else:
a = randint(1, 100)
if a < 21:
defans = takim2_mevki[0]
takim2_gol = randint(1, defans)
return takim2_gol
elif a < 61:
orta_saha = takim2_mevki[1]
takim2_gol = randint[1, orta_saha]
return takim2_gol + int(takim2_mevki[0])
else:
forvet = takim2_mevki[2]
takim2_gol = randint[1, forvet]
return takim2_gol + int(takim2_mevki[0] + takim2_mevki[1])
else:
temp = takim1_mevki
takim1_mevki = takim2_mevki
takim2_mevki = temp
guc = randint(1, 10)
if guc < 8:
a = randint(1, 100)
if a < 21:
defans = takim1_mevki[0]
takim1_gol = randint(1, defans)
return takim1_gol
elif a < 61:
orta_saha = takim1_mevki[1]
takim1_gol = randint[1, orta_saha]
return takim1_gol + int(takim1_mevki[0])
else:
forvet = takim1_mevki[2]
takim1_gol = randint[1, forvet]
return takim1_gol + int(takim1_mevki[0] + takim1_mevki[1])
else:
a = randint(1, 100)
if a < 21:
defans = takim2_mevki[0]
takim2_gol = randint(1, defans)
return takim2_gol
elif a < 61:
orta_saha = takim2_mevki[1]
takim2_gol = randint[1, orta_saha]
return takim2_gol + int(takim2_mevki[0])
else:
forvet = takim2_mevki[2]
takim2_gol = randint[1, forvet]
return takim2_gol + int(takim2_mevki[0] + takim2_mevki[1])
def mac_esnasi(self):
"" | [
"diren.aydin97@gmail.com"
] | diren.aydin97@gmail.com |
99f9879b084e7995182c1781a64d4acf9fe84f60 | e53121ca0848b23be4c28953b600d190c981b91f | /pybeerxml/yeast.py | a9edcd3f4329f6887328e1a33d5be0781f602328 | [
"MIT"
] | permissive | scheb/pybeerxml | ccce08ee055278f8609495992a3d99117190dc4b | 447cd184fac086a349681dd54e6172425fe5d173 | refs/heads/master | 2022-12-20T03:23:17.603436 | 2020-05-05T08:46:08 | 2020-05-05T08:46:08 | 292,526,512 | 0 | 0 | MIT | 2020-09-03T09:34:56 | 2020-09-03T09:34:56 | null | UTF-8 | Python | false | false | 273 | py | class Yeast(object):
def __init__(self):
self.name = None
self.type = None
self.form = None
self.attenuation = None
self.notes = None
self.laboratory = None
self.product_id = None
self.flocculation = None
| [
"tom.herold@scalableminds.com"
] | tom.herold@scalableminds.com |
6b01cfb5c8f5db0bcbba603b6e193f69037795a9 | 2679238f9042eb26f1621520ba556cf8760b70d2 | /api/models.py | 4cd7c9799772f26d821d1e036cd4046a231ec74d | [] | no_license | zhenwei94/DRF-API-Practice | d97574455159de56a97410b62df3494cafc9839c | 306ff6eb0e44cfb7dc844706b9524f2a4caf2b53 | refs/heads/master | 2023-06-21T23:12:51.843646 | 2021-07-21T14:47:34 | 2021-07-21T14:47:34 | 314,600,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,294 | py | from django.db import models
from django.core.validators import MinLengthValidator, MaxLengthValidator, MaxValueValidator, MinValueValidator
from django.contrib.auth.models import User
def upload_path(instance, filename):
return '/'.join(['covers',str(instance.title),filename])
# Create your models here.
class Booknumber(models.Model):
isbn10 = models.CharField(validators=[MinLengthValidator(10)], max_length=10, unique=True)
isbn13 = models.CharField(validators=[MinLengthValidator(13)], max_length=13, unique=True)
class Author(models.Model):
name = models.CharField(max_length=50)
surname = models.CharField(max_length=20)
book = models.ManyToManyField('Book', related_name='authors')
class Movie(models.Model):
title = models.CharField(max_length=50, blank=False)
cover = models.ImageField(blank=True, null = True, upload_to=upload_path)
class Book(models.Model):
title = models.CharField(max_length=50, blank=False)
description = models.CharField(max_length=100, null=False)
bookNumber = models.OneToOneField(Booknumber, on_delete=models.CASCADE, null=False, blank=False)
cover = models.ImageField(blank=True, null = True, upload_to=upload_path)
def no_ratings(self):
ratings = Rating.objects.filter(book=self)
return len(ratings)
def avg_ratings(self):
ratings= Rating.objects.filter(book=self)
sum = 0
for i in ratings:
sum=sum+i.rating
if len(ratings)<1:
return 0
else:
return sum/len(ratings)
class Character(models.Model):
name = models.CharField(max_length=50)
book = models.ForeignKey(Book,on_delete=models.CASCADE, related_name='books')
class Rating(models.Model):
rating = models.IntegerField(validators=[MinValueValidator(1),MaxValueValidator(5)])
user = models.ForeignKey(User, on_delete=models.CASCADE)
book = models.ForeignKey(Book, on_delete=models.CASCADE, related_name='ratings')
class Meta:
unique_together=(('user','book'))
index_together = (('user','book'))
class Person(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='person')
age = models.PositiveSmallIntegerField()
bio = models.CharField(max_length=256)
| [
"zhennweii94@gmail.com"
] | zhennweii94@gmail.com |
2eff8a4800908baec255bf8a1ad50839ec9f8c29 | 5a48daaeb240280c974d4e89115e655970701887 | /plot_data.py | 013e4fa2542c3260aa4a518fa958ebf54757961a | [] | no_license | ragulin/mds-riksdagen | 78eaa64d0264f82e5b2c76a56bb3eee40b4e93ef | 69aa1235284bcd6d755412f1ccd0e92e8984321b | refs/heads/master | 2021-01-22T03:01:37.370991 | 2013-11-23T09:55:50 | 2013-11-23T09:55:50 | 14,639,909 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.manifold import MDS
from sklearn.metrics.pairwise import euclidean_distances
import sys
def party_color(party):
party = party.upper()
if party == 'V':
return '#D90000'
elif party == 'S':
return '#FF0000'
elif party == 'MP':
return '#0DFF5B'
elif party == 'C':
return '#078C32'
elif party == 'FP':
return '#2977E8'
elif party == 'M':
return '#290CE8'
elif party == 'KD':
return '#1111E8'
elif party == 'SD':
return '#704805'
file = sys.argv[1]
try:
if sys.argv[2] == "true":
plot_names = True
except IndexError:
plot_names = False
df = pd.read_csv(file)
vote_columns = [c for c in df.columns if c != 'voter' and c != 'party' and c != 'name']
distances = euclidean_distances(df[vote_columns].values)
mds = MDS(dissimilarity="precomputed").fit_transform(distances)
plt.figure(figsize = (8, 5))
plt.plot(mds[:, 0], mds[:, 1], '.', alpha = 0)
for voter in df.iterrows():
plt.annotate((voter[1]['party'], voter[1]['name'].decode('utf-8'))[plot_names],
(mds[voter[0], 0], mds[voter[0],1]),
color = party_color(voter[1]['party']),
horizontalalignment = 'center',
verticalalignment = 'center')
plt.setp(plt.gca().get_yaxis(), visible = False)
plt.setp(plt.gca().get_xaxis(), visible = False)
plt.savefig(file + ".png")
| [
"mikaelfor@gmail.com"
] | mikaelfor@gmail.com |
5acee43c388a21a67f5191b96f2b042f9df2023d | 92b6091b6b37d55649aa31c606022923931085ad | /tests/lib/logging.py | f5a1f5a3212b76db6affae3fbec054e6c8d6ab7b | [] | permissive | premandfriends/boardfarm-1 | 043d5db7d5cf09826eb4498f8ce8105fa065f5c2 | 3c952c94507fff25ba9955cad993610ea4a95e2e | refs/heads/master | 2020-04-06T11:09:29.868388 | 2019-04-29T09:12:43 | 2019-04-30T14:10:50 | 157,405,854 | 0 | 0 | BSD-3-Clause-Clear | 2018-11-13T15:52:11 | 2018-11-13T15:52:10 | null | UTF-8 | Python | false | false | 3,162 | py | # Copyright (c) 2015
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
import time
import types
from datetime import datetime
from termcolor import colored
import re
def now_short(_format = "%Y%m%d-%H%M%S"):
"""
Name:now_short
Purpose: Get current date and time string
Input:None
Output:String in "YYYYMMDD-hhmmss" format
"""
timeString = time.strftime(_format, time.localtime())+"\t"
return timeString
def logfile_assert_message(s, condition, message):
if not condition:
s.log_to_file += now_short()+message+": FAIL\r\n"
assert 0, message+": FAIL\r\n"
else:
s.log_to_file += now_short()+message+": PASS\r\n"
class LoggerMeta(type):
def __new__(cls, name, bases, attrs):
for attr_name, attr_value in attrs.iteritems():
if isinstance(attr_value, types.FunctionType):
attrs[attr_name] = cls.deco(attr_value)
return super(LoggerMeta, cls).__new__(cls, name, bases, attrs)
@classmethod
def deco(cls, func):
def wrapper(*args, **kwargs):
func_args_str = "%s %s" % (repr(args), repr(kwargs))
to_log = '%s.%s ( %s )' % (func.__module__, func.__name__, func_args_str)
if hasattr(args[0], 'start'):
args[0].log_calls += '[%s]calling %s\r\n' % ((datetime.now()-args[0].start).total_seconds(), to_log)
ret = func(*args, **kwargs)
if hasattr(args[0], 'start'):
args[0].log_calls += "[%s]returned %s = %s\r\n" % ((datetime.now()-args[0].start).total_seconds(), to_log, repr(ret))
return ret
return wrapper
def log_message(s, msg, header = False):
line_sep = ('=' * (len(msg)))
full_msg = "\n\t\t"+line_sep+"\n\t\t"+msg+"\n\t\t"+line_sep+"\n"
if header:
print("\n\n\t\t\t***"+msg+"***\n\n")
s.log_to_file += now_short()+full_msg+"\r\n"
else:
print(full_msg)
s.log_to_file += now_short()+msg+"\r\n"
class o_helper(object):
def __init__(self, parent, out, color):
self.color = color
self.out = out
self.parent = parent
self.first_write = True
def write(self, string):
if self.first_write:
self.first_write = False
string = "\r\n" + string
if self.color is not None:
self.out.write(colored(string, self.color))
else:
self.out.write(string)
if not hasattr(self.parent, 'start'):
return
td = datetime.now()-self.parent.start
# check for the split case
if len(self.parent.log) > 1 and self.parent.log[-1] == '\r' and string[0] == '\n':
tmp = '\n[%s]' % td.total_seconds()
tmp += string[1:]
string = tmp
to_log = re.sub('\r\n', '\r\n[%s]' % td.total_seconds(), string)
self.parent.log += to_log
if hasattr(self.parent, 'test_to_log'):
self.parent.test_to_log.log += re.sub('\r\n\[', '\r\n%s: [' % self.parent.test_prefix, to_log)
def flush(self):
self.out.flush()
| [
"msm-oss@mcclintock.net"
] | msm-oss@mcclintock.net |
a2636f316b854d21147a509c1673d6a34b863261 | bfd6ac084fcc08040b94d310e6a91d5d804141de | /scripts/archive/branching_ratio/data_analysis/2013Mar21/plot_of data.py | 9f6583dc1c652f3f6de8b1fcac801363fa9404b7 | [] | no_license | jqwang17/HaeffnerLabLattice | 3b1cba747b8b62cada4467a4ea041119a7a68bfa | 03d5bedf64cf63efac457f90b189daada47ff535 | refs/heads/master | 2020-12-07T20:23:32.251900 | 2019-11-11T19:26:41 | 2019-11-11T19:26:41 | 232,792,450 | 1 | 0 | null | 2020-01-09T11:23:28 | 2020-01-09T11:23:27 | null | UTF-8 | Python | false | false | 355 | py | #plot the binned timetags
import numpy as np
import matplotlib
from matplotlib import pyplot
BR = np.array([0.9357,0.9357,0.9357,0.9356,0.9356,0.9357])
power = np.array([-20.01,-20,-19.99,-15,-15.01,-11])
error = np.array([0.0001,0.0001,0.0001,0.0001,0.0002,0.0002])
pyplot.errorbar(power, BR,yerr=error)
pyplot.title('Branching Ratio')
pyplot.show() | [
"haeffnerlab@gmail.com"
] | haeffnerlab@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.