seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
127045110 | # 这样处理是错的:处理[3, 4]的时候, left=3, right=4, mid=3, 最后传入的时候list=[3, 4],
# 因为它是按分开后的数据重新计数, 此时list的索引是从0开始的
# def merge_sort(list, left, right):
# if len(list) <= 1:
# return list
# mid = int((left + right)/2)
# left_list = merge_sort(list[: mid+1], left, mid)
# right_list = merge_sort(list[mid+1:], mid+1, right)
# return merge(left_list, right_list)
def merge_sort(list):
if len(list) <= 1:
return list
left = 0
right = len(list) - 1
# / 会产生浮点数 //表示整数除法
mid = (left+right)//2
left_list = merge_sort(list[:mid+1])
right_list = merge_sort(list[mid+1:])
return merge(left_list, right_list)
def merge(left_list, right_list):
temp_list = []
while (len(left_list) > 0 and len(right_list) > 0):
if left_list[0] > right_list[0]:
temp_list.append(right_list.pop(0))
else:
temp_list.append(left_list.pop(0))
temp_list.extend(left_list)
temp_list.extend(right_list)
list = temp_list
return list
list = [2, 1, 5, 3, 4]
res = merge_sort(list)
print(res)
| null | dir_sort/merge_sort.py | merge_sort.py | py | 1,224 | python | en | code | null | code-starcoder2 | 51 |
166823333 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 10:51:54 2018
@author: matthewszhang
"""
import time
import os
import os.path as osp
import numpy as np
from baselines import logger
from collections import deque
from baselines.feudal.models import I2AModel
from baselines.feudal.runners import I2ARunner
PATH="tmp/build/graph"
def package_environment(states, actions, rewards):
train_states = []
train_actions = []
train_rewards = []
train_nstates = []
for (state, action, reward) in states, actions, rewards:
train_states.append(state[:-1])
train_nstates.append(state[1:])
train_actions.append(action[:-1])
train_rewards.append(reward[:-1])
(np.asarray(arr).reshape((-1, arr.shape[-1])))
def pack(arr):
try:
arr = np.vstack(arr)
if arr.shape[0]==1:
return np.flatten(arr)
else: return arr
except:
return np.hstack(arr)
def constfn(val):
def f(_):
return val
return f
def sbi(arr, dones):
nbatch=dones.shape[0]
abd=[]
si=0
for t in range(nbatch):
if dones[t] == 1:
abd.append(arr[si:t+1])
si=t+1
elif t==nbatch-1:
abd.append(arr[si:])
return abd
def mcret(actions, rews, dones, vals, lam=0.95, gam=0.99):
mb_returns = np.zeros_like(rews)
mb_advs = np.zeros_like(rews)
lastgaelam = 0
nsteps = rews.shape[0]
nextvalues=vals[-1:,]
for t in reversed(range(nsteps)):
if t == nsteps - 1:
nextnonterminal = 0
nextvalues = 0 # assume last is terminal -> won't be too significant unless tstep is large
else:
nextnonterminal = 1.0 - dones[t+1]
nextvalues = vals[t+1]
delta = rews[t] + gam * nextvalues * nextnonterminal - vals[t]
mb_advs[t] = lastgaelam = delta + gam * lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + vals
return mb_returns, mb_advs
def learn(*, policy, env, tsteps, nsteps, encoef, lr, cliphigh, clipinc, vcoef,
mgn, gmax, ginc, lam, nhier, nmb, noe, ngmin, nginc, bmin, bmax, nhist,
recurrent, val, max_len=100, save_interval=0, log_interval=1, load_path=None):
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliphigh, float):
arr = np.asarray([cliphigh*(clipinc**i) for i in range(nhier)], dtype=np.float32)
cliprange = constfn(arr)
else:
def cr(t):
arr = [cliphigh(t)*(clipinc(t)**i) for i in range(nhier)]
return np.asarray(arr, dtype=np.float32)
cliprange = cr
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nbatch = (nenvs * nsteps)
nbatch_train = nbatch // nmb
make_model = lambda : I2AModel(policy, ob_space, ac_space, max_grad=mgn,
encoef=encoef, vcoef=vcoef, klcoef=klcoef, aggregator='concat',
traj_len = tl, nh=nh)
if save_interval and logger.get_dir():
import cloudpickle
with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model()
if load_path is not None:
model.load(load_path)
runner = I2ARunner(env=env, model=model, nsteps=nsteps)
epinfobuf = deque(maxlen=100)
tfirststart = time.time()
nupdates = tsteps//nbatch
if not val:
vre = np.zeros((nhier), dtype=np.float32)
val_temp = 0.9
for update in range(1, nupdates+1):
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
lrnow = lr(frac)
cliprangenow = cliprange(frac)
obs, rewards, actions, dones, epinfos = runner.run()
epinfobuf.extend(epinfos)
mblossvals = []
obs, actions, rewards, dones = (sbi(arr, dones) for arr in
(obs, actions, rewards, dones))
env_train_set = package_environment(obs, actions, rewards)
if not recurrent:
nlps, vfs = model.info(obs, actions)
obs, actions, rewards, dones, nlps, vfs = \
map(pack,(obs,actions,rewards,dones,nlps,vfs))
if not val:
vre = vre * val_temp + np.mean(rewards, axis=0) * (1-val_temp)
vfs = np.reshape(np.repeat(vre, nsteps), [nsteps, nhier])
rewards, advs = mcret(actions, rewards, dones, vfs, lam=lam, gam=model.gam)
actions = actions.flatten() #safety
inds = np.arange(nbatch)
for _ in range(noe):
np.random.shuffle(inds)
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, actions, nlps, advs, rewards, vfs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
pass
lossvals = np.mean(mblossvals, axis=0)
tnow = time.time()
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir():
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
env.close()
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
| null | baselines/feudal/i2a.py | i2a.py | py | 6,286 | python | en | code | null | code-starcoder2 | 51 |
51125941 | #!/usr/bin/env python
"""
Add seizure names to LR results
Input: LR_results, name = "obs_LRs.{species}.txt"
Seizure file matching sample names to seizures
"""
import argparse
def run(input_file, seizure_file):
with open(input_file, 'r') as infile:
header = infile.readline().strip().split('\t')
file_head = header
samps = {}
for i, line in enumerate(infile):
line = line.strip().split('\t')
samps[i] = line
with open(seizure_file, 'r') as infile:
header = infile.readline()
seizures = {}
for line in infile:
line = line.strip().split('\t')
seizures[line[1]] = line[0]
output_file = input_file.replace('.txt', '.seizures.txt')
with open(output_file, 'w') as outfile:
outfile.write('\t'.join(file_head) + '\tseizure1\tseizure2\n')
for k, v in samps.iteritems():
s1 = v[0]
s2 = v[1]
seizure1 = seizures[s1]
seizure2 = seizures[s2]
outfile.write('\t'.join(v) + '\t' + seizure1 + '\t' + seizure2 + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input_file',
help='Name of input LR_file')
parser.add_argument('--seizure_file',
help='Name of seizure master file')
args = parser.parse_args()
run(args.input_file, args.seizure_file)
| null | data_analysis/post_processing/1_add_seizures.py | 1_add_seizures.py | py | 1,504 | python | en | code | null | code-starcoder2 | 51 |
432095531 | #!/usr/bin/env python
import os
import sys
import time
import signal
import argparse
import project_root
from os import path
from subprocess import Popen, call
from helpers.helpers import get_open_udp_port
def run(args):
# run worker.py on ps and worker hosts
for job_name in ['ps', 'worker']:
host_list = args[job_name + '_list']
procs = args[job_name + '_procs']
for i in xrange(len(host_list)):
ssh_cmd = ['ssh', host_list[i]]
cmd = ['python', args['worker_src'],
'--ps-hosts', args['ps_hosts'],
'--worker-hosts', args['worker_hosts'],
'--job-name', job_name,
'--task-index', str(i)]
if args['dagger']:
cmd.append('--dagger')
if args['driver'] is not None:
cmd += ['--driver', args['driver']]
cmd = ssh_cmd + cmd
sys.stderr.write('$ %s\n' % ' '.join(cmd))
procs.append(Popen(cmd, preexec_fn=os.setsid))
# ps will block forever
for ps_proc in args['ps_procs']:
ps_proc.communicate()
def cleanup(args):
all_procs = args['ps_procs'] + args['worker_procs']
for proc in all_procs:
try:
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
except OSError as e:
sys.stderr.write('%s\n' % e)
host_set = set(args['ps_list'] + args['worker_list'])
pkill_script = path.join(args['rlcc_dir'], 'helpers', 'pkill.py')
for host in host_set:
kill_cmd = ['ssh', host, 'python', pkill_script, args['rlcc_dir']]
sys.stderr.write('$ %s\n' % ' '.join(kill_cmd))
call(kill_cmd)
sys.stderr.write('\nAll cleaned up.\n')
def construct_args(prog_args):
# construct a dictionary of arguments
args = {}
# file paths
args['rlcc_dir'] = prog_args.rlcc_dir
args['worker_src'] = path.join(args['rlcc_dir'], 'a3c', 'worker.py')
# hostnames and processes
args['ps_hosts'] = prog_args.ps_hosts
args['worker_hosts'] = prog_args.worker_hosts
args['ps_list'] = prog_args.ps_hosts.split(',')
args['worker_list'] = prog_args.worker_hosts.split(',')
args['username'] = prog_args.username
for i, host in enumerate(args['ps_list']):
args['ps_list'][i] = args['username'] + '@' + host.split(':')[0]
for i, host in enumerate(args['worker_list']):
args['worker_list'][i] = args['username'] + '@' + host.split(':')[0]
args['ps_procs'] = []
args['worker_procs'] = []
args['dagger'] = prog_args.dagger
args['driver'] = prog_args.driver
return args
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--ps-hosts', required=True, metavar='[HOSTNAME:PORT, ...]',
help='comma-separated list of hostname:port of parameter servers')
parser.add_argument(
'--worker-hosts', required=True, metavar='[HOSTNAME:PORT, ...]',
help='comma-separated list of hostname:port of workers')
parser.add_argument(
'--username', default='ubuntu',
help='username used in ssh connection (default: ubuntu)')
parser.add_argument(
'--rlcc-dir', metavar='DIR', default='/home/ubuntu/RLCC',
help='absolute path to RLCC/ (default: /home/ubuntu/RLCC)')
parser.add_argument('--dagger', action='store_true',
help='run Dagger rather than A3C')
parser.add_argument('--driver', help='hostname of the driver')
prog_args = parser.parse_args()
args = construct_args(prog_args)
# run worker.py on ps and worker hosts
try:
run(args)
except KeyboardInterrupt:
pass
finally:
cleanup(args)
if __name__ == '__main__':
main()
| null | a3c/train.py | train.py | py | 3,739 | python | en | code | null | code-starcoder2 | 51 |
433096351 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.Index.as_view(), name='index'),
url(r'^clubs/$', views.ClubList.as_view(), name='club_list'),
url(r'^clubs/add/$', views.ClubCreate.as_view(), name='club_create'),
url(r'^clubs/(?P<club_slug>[\w-]+)/$', views.ClubDetail.as_view(), name='club_detail'),
url(r'^clubs/(?P<club_slug>[\w-]+)/edit/$', views.ClubUpdate.as_view(), name='club_update'),
url(r'^clubs/(?P<club_slug>[\w-]+)/archers/add/$', views.ClubArcherCreate.as_view(), name='club_archer_create'),
url(r'^clubs/(?P<club_slug>[\w-]+)/archived/$', views.ArchiveArcherList.as_view(), name='archive_archer_list'),
url(r'^counties/add/$', views.CountyCreate.as_view(), name='county_create'),
url(r'^archer/add/$', views.ArcherCreate.as_view(), name='archer_create'),
url(r'^archer/(?P<pk>\d+)/$', views.ArcherDetail.as_view(), name='archer_detail'),
url(r'^archer/(?P<pk>\d+)/edit/$', views.ArcherUpdate.as_view(), name='archer_update'),
url(r'^archer/(?P<pk>\d+)/archive/$', views.ArcherArchive.as_view(), name='archer_archive'),
]
| null | core/urls.py | urls.py | py | 1,124 | python | en | code | null | code-starcoder2 | 51 |
389482296 | import notifications
from django.conf.urls import patterns, include, url
from django.contrib import admin
from app.views import ProjectViewSet, TaskViewSet, UserViewSet, ChatRoomViewSet, ChatViewSet
from rest_framework import routers
from django.conf import settings
admin.autodiscover()
router = routers.DefaultRouter()
router.register(r'project', ProjectViewSet)
router.register(r'task', TaskViewSet)
router.register(r'users', UserViewSet)
router.register(r'chat-room', ChatRoomViewSet)
#router.register(r'chat', ChatViewSet)
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'mybeez.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url('^inbox/notifications/', include(notifications.urls)),
url(r'', include('user_sessions.urls', 'user_sessions')),
url(r'^api/', include(router.urls)),
url(r'^$', 'app.views.home', name='home'),
url(r'^login$', 'app.views.loginAngular', name='loginAngular'),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^new_project$', 'app.views.new_project', name='new_project'),
url(r'^profile$', 'app.views.profile', name='profile'),
url(r'^new_task$', 'app.views.new_task', name='new_task'),
url(r'^node_api$', 'app.views.node_api', name='node_api'),
url(r'^task_post$', 'app.views.task_post', name='task_post'),
url(r'^save_message$', 'app.views.save_message', name='save_message'),
url(r'^chat$', 'app.views.ChatViewSet', name='chat'),
url(r'^project$', 'app.views.get_project', name='get_project'),
url(r'^room_check$', 'app.views.room_check', name='room_check'),
url(r'^logout$', 'django.contrib.auth.views.logout', {'next_page': '/'})
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
| null | mybeez/urls.py | urls.py | py | 2,033 | python | en | code | null | code-starcoder2 | 51 |
585238684 | def get_candies(ratings):
if len(ratings) == 1:
return 1
candies = list(1 for r in ratings)
minima = list()
for i,r in enumerate(ratings):
prevr = ratings[i-1] if i > 0 else 10**6
nextr = ratings[i+1] if i < len(ratings)-1 else 10**6
if r <= prevr and r <= nextr:
minima.append(i)
for minimum in minima:
curr = 1
i = minimum
while i > 0 and ratings[i-1] > ratings[i]:
candies[i-1] = max(candies[i-1], candies[i] + 1)
i -= 1
i = minimum
while i < len(ratings) - 1 and ratings[i+1] > ratings[i]:
candies[i+1] = max(candies[i+1], candies[i] + 1)
i += 1
return sum(candies)
n = int(input())
ratings = list()
for i in range(n):
ratings.append(int(input()))
print(get_candies(ratings))
| null | algorithms/dynamic/candies.py | candies.py | py | 844 | python | en | code | null | code-starcoder2 | 51 |
618714890 | # Import utilities
import datetime
from enum import Enum
class Person:
# Define gender enum
class Sex(Enum):
MALE = 1
FEMALE = 2
# Defining a method to print cumulative data for the family
def print_data(self):
print(self.name.title() + " " + self.last_name.title() + ", " + self.age + " years old, "
+ str(self.sex.name).lower() + ".")
# 2050 birthday calculator (by importing datetime to avoid inserting current year manually)
def print_future_age(self):
current_year = int(datetime.datetime.now().year)
future_birthday = 2050 - (current_year - int(self.age))
return str(future_birthday)
# Method to print family's preferences based on their favorite seasons
def preference(self):
if self.season.lower() == "spring":
print(self.name.title() + " likes flowers. Watch out for allergies!")
elif self.season.lower() == "summer":
print(self.name.title() + " likes swimming, sun tanning, and Super Paradise!")
elif self.season.lower() == "autumn":
print(self.name.title() + " likes rain. What a nostalgic personality...")
elif self.season.lower() == "winter":
print(self.name.title() + " likes snow. Let's go snowboarding!")
else:
print(" - Sorry, I cannot work anymore. Please restart me.")
| null | Python version/Person.py | Person.py | py | 1,421 | python | en | code | null | code-starcoder2 | 51 |
510382768 | """
print:
(a), (b), (c), (d), (e), ...... (z)
(a,b), (a,c), (a,d), ... (y,z)
...
(a,b,c,d, ...,x,y,z)
"""
import string
def calcPerm(s, temp, num, total, fro, to, array):
if num > total:
return
if num == total:
array.append(f"({','.join(temp)})")
else:
for i in range(fro, to+1):
temp[num] = s[i]
calcPerm(s, temp, num + 1, total, i + 1, to, array)
def main():
array = []
s = string.ascii_letters[:26]
for i in range(1, 27):
calcPerm(s, [None] * i, 0, i, 0, len(s)-1, array)
print(",".join(array))
array = []
if __name__ == "__main__":
main()
| null | learning-algorithm-book/1/1-3.py | 1-3.py | py | 651 | python | en | code | null | code-starcoder2 | 51 |
518498478 | def fibonacci(n):
a=0
b=1
print ("The fibonacci series is:")
print (a, end=" ")
print (b, end=" ")
for i in range(n-2):
c=a+b
a=b
b=c
print (c, end=" ")
fibonacci(10)
| null | Fibonacci.py | Fibonacci.py | py | 251 | python | en | code | null | code-starcoder2 | 51 |
275249893 | import sys
from model import SOS, EOS, PAD
from utils import tokenize
MIN_LENGTH = 3
MAX_LENGTH = 50
def load_data():
data = []
vocab_src = {PAD: 0, EOS: 1, SOS: 2}
vocab_tgt = {PAD: 0, EOS: 1, SOS: 2}
fo = open(sys.argv[1])
for line in fo:
src, tgt = line.split("\t")
tokens_src = tokenize(src, "word")
tokens_tgt = tokenize(tgt, "word")
if len(tokens_src) < MIN_LENGTH or len(tokens_src) > MAX_LENGTH:
continue
if len(tokens_tgt) < MIN_LENGTH or len(tokens_tgt) > MAX_LENGTH:
continue
seq_src = []
seq_tgt = []
for word in tokens_src:
if word not in vocab_src:
vocab_src[word] = len(vocab_src)
seq_src.append(str(vocab_src[word]))
for word in tokens_tgt:
if word not in vocab_tgt:
vocab_tgt[word] = len(vocab_tgt)
seq_tgt.append(str(vocab_tgt[word]))
data.append((seq_src, seq_tgt))
data.sort(key = lambda x: len(x[0]), reverse = True) # sort by source sequence length
fo.close()
return data, vocab_src, vocab_tgt
def save_data(data):
fo = open(sys.argv[1] + ".csv", "w")
for seq in data:
fo.write(" ".join(seq[0]) + "\t" + " ".join(seq[1]) + "\n")
fo.close()
def save_vocab(vocab, ext):
fo = open(sys.argv[1] + ".vocab." + ext, "w")
for word, _ in sorted(vocab.items(), key = lambda x: x[1]):
fo.write("%s\n" % word)
fo.close()
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit("Usage: %s training_data" % sys.argv[0])
data, vocab_src, vocab_tgt= load_data()
save_data(data)
save_vocab(vocab_src, "src")
save_vocab(vocab_tgt, "tgt")
| null | prepare.py | prepare.py | py | 1,734 | python | en | code | null | code-starcoder2 | 51 |
591538766 | #coding=utf-8
# from selenium import webdriver
# import time
# browser=webdriver.Chrome()
# browser.get("http://www.baidu.com")
# time.sleep(5)
# browser.quit()
# from selenium import webdriver
# driver=webdriver.Chrome()
# driver.get("http://www.baidu.com")
# print("浏览器最大化")
# driver.maximize_window()#浏览器最大化
# driver.quit()
# from selenium import webdriver
# driver=webdriver.Chrome()
# driver.get("http://www.baidu.com")
# #参数数字为像素点
# print("设置浏览器宽480、高800显示")
# driver.set_window_size(480,800)#设置浏览器的宽和高
# driver.quit()
from selenium import webdriver
import time
driver=webdriver.Chrome()
first_url='http://www.baidu.com'
print("now access %s"%first_url)
driver.get(first_url) #访问百度首页
time.sleep(5)
second_url='http://news.baidu.com'
print("now access %s"%second_url)
driver.get(second_url)#访问新闻页面
time.sleep(5)
print("back to %s"%first_url)
driver.back()#浏览器后退
time.sleep(5)
print("forward to %s"%second_url)
driver.forward()#浏览器前进
time.sleep(5)
driver.quit() | null | Webtest/practice.py | practice.py | py | 1,087 | python | en | code | null | code-starcoder2 | 51 |
425628398 | import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
EPOCHS=2000
training_data = np.array([[0,0],[0,1],[1,0],[1,1]], "float32")
target_data = np.array([[0],[1],[1],[0]], "float32")
model = Sequential()
model.add(Dense(16, input_shape=(2,), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
print("-----------not train")
print(model.predict(training_data))
history = model.fit(training_data, target_data,
epochs=EPOCHS, verbose=0)
print("----------- train")
print(model.predict(training_data[3:]))
# model.evaluate(training_data, target_data, steps=2) | null | day16/mykeras02.py | mykeras02.py | py | 693 | python | en | code | null | code-starcoder2 | 51 |
110446893 | class Node:
data = -1
left = None
right = None
def __init__(self, data):
self.data = data
def buildBTRec():
d = int(input())
if d == -1:
return None
root = Node(d)
root.left = buildBTRec()
root.right = buildBTRec()
return root
def preorder(root):
if(root == None):
return
print(root.data, end = ", ")
preorder(root.left)
preorder(root.right)
def inorder(root):
if(root == None):
return
inorder(root.left)
print(root.data, end = ", ")
inorder(root.right)
def postorder(root):
if(root == None):
return
postorder(root.left)
postorder(root.right)
print(root.data, end = ", ")
def countNodes(root):
if(root == None):
return 0
c1 = countNodes(root.left)
c2 = countNodes(root.right)
return 1+c1+c2
def height(root):
if(root == None):
return 0
return 1 + max(height(root.left), height(root.right))
total_tilt = 0
def tilt(root): # this function returns sum of nodes of tree
if(root == None):
return 0
global total_tilt
left_sum = tilt(root.left)
right_sum = tilt(root.right)
total_tilt += abs(left_sum - right_sum)
return left_sum + right_sum + root.data
root = buildBTRec()
print()
preorder(root)
print()
inorder(root)
print()
postorder(root)
print()
print(countNodes(root))
print(height(root))
tilt(root)
print(total_tilt)
"""
1
2
3
-1
-1
-1
4
5
-1
6
-1
-1
7
-1
-1
"""
| null | Basic_Data_Structures_Python/Lecture 18/BinaryTree.py | BinaryTree.py | py | 1,339 | python | en | code | null | code-starcoder2 | 51 |
481478560 | #守护进程
#守护进程会随着主进程的代码执行结束而结束
#正常的子进程没有执行完的时候主进程要一直等着
#守护进程不能再开户子进程
import time
from multiprocessing import Process
def cal_time():
while True:
time.sleep(1)
print("过去了1s")
if __name__ == '__main__':
p = Process(target=cal_time)
p.daemon = True # 一定在开启进程之前设置
p.start()
for i in range(100):
time.sleep(0.1)
print('*' * i)
| null | process/process_daemon.py | process_daemon.py | py | 517 | python | en | code | null | code-starcoder2 | 51 |
571550198 | import unittest
from torch.distributions import Normal, Exponential, Independent, LogNormal
from pyfilter.filters import UKF, APF
from pyfilter.timeseries import AffineProcess, LinearGaussianObservations
from pyfilter.utils import concater
from pyfilter.normalization import normalize
import torch
from pyfilter.inference.sequential import NESSMC2, NESS, SMC2FW, SMC2
def f(x, alpha, sigma):
return alpha * x
def g(x, alpha, sigma):
return sigma
def fo(x, alpha, sigma):
return alpha * x
def go(x, alpha, sigma):
return sigma
def fmvn(x, alpha, sigma):
x1 = alpha * x[..., 0]
x2 = x[..., 1]
return concater(x1, x2)
def gmvn(x, alpha, sigma):
return concater(sigma, sigma)
class MyTestCase(unittest.TestCase):
def test_Inference(self):
# ===== Distributions ===== #
dist = Normal(0., 1.)
mvn = Independent(Normal(torch.zeros(2), torch.ones(2)), 1)
# ===== Define model ===== #
linear = AffineProcess((f, g), (0.99, 0.25), dist, dist)
model = LinearGaussianObservations(linear, scale=0.1)
mv_linear = AffineProcess((fmvn, gmvn), (0.5, 0.25), mvn, mvn)
mvnmodel = LinearGaussianObservations(mv_linear, torch.eye(2), scale=0.1)
# ===== Test for multiple models ===== #
priors = Exponential(1.), LogNormal(0., 1.)
hidden1d = AffineProcess((f, g), priors, dist, dist)
oned = LinearGaussianObservations(hidden1d, 1., scale=0.1)
hidden2d = AffineProcess((fmvn, gmvn), priors, mvn, mvn)
twod = LinearGaussianObservations(hidden2d, torch.eye(2), scale=0.1 * torch.ones(2))
particles = 1000
# ====== Run inference ===== #
for trumod, model in [(model, oned), (mvnmodel, twod)]:
x, y = trumod.sample_path(1000)
algs = [
(NESS, {'particles': particles, 'filter_': APF(model.copy(), 200)}),
(NESS, {'particles': particles, 'filter_': UKF(model.copy())}),
(SMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)}),
(SMC2FW, {'particles': particles, 'filter_': APF(model.copy(), 200)}),
(NESSMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)})
]
for alg, props in algs:
alg = alg(**props)
state = alg.fit(y)
w = normalize(state.w)
zipped = zip(
trumod.hidden.theta + trumod.observable.theta, # True parameter values
alg.filter.ssm.hidden.theta + alg.filter.ssm.observable.theta # Inferred
)
for trup, p in zipped:
if not p.trainable:
continue
kde = p.get_kde(weights=w)
transed = p.bijection.inv(trup)
densval = kde.logpdf(transed.numpy().reshape(-1, 1))
priorval = p.distr.log_prob(trup)
assert (densval > priorval.numpy()).all()
if __name__ == '__main__':
unittest.main()
| null | test/inference.py | inference.py | py | 3,115 | python | en | code | null | code-starcoder2 | 51 |
326575857 | """
This is an attempt at a simple royale test. I just wanna try a simple thing that doesn't get overengineered lol. That won't happen.
On a scale of 1-10 how scared are you (in this battle royale) when someone else is nearby? Then use that to calculate fear levels which change which tasks are more likely?
Only likely to explore when you don't know see anyone.
"""
import random
NEARBY_DISTANCE = 2
VISIBLE_DISTANCE = 3
data = """
a [Swing Sword at [p]]
"""
actions = []
items = []
class Action:
def __init__(self, string, requirements):
self.string = string
self.requirements = requirements
def can_do_action(self, person, surroundings):
pass
def do_action(self, person, surroundings):
pass
class PickupAction (Action):
def __init__(self):
pass
def can_do_action(self, person, world):
# surroundings is a list of objects. We need to check to see if there are objects in that list
pickupableObjects = [x for x in world.get_nearby_objects(person.location) if x.portable]
return len(pickupableObjects) > 0
def do_action(self, person, world):
pickupableObjects = [x for x in world.get_nearby_objects(person.location) if x.portable]
o = random.choice(pickupableObjects)
person.inventory += [o]
world.remove_object_from_world(o)
print(str(person) + " picked up " + str(o))
class DropAction (Action):
def __init__(self):
pass
def can_do_action(self, person, world):
# surroundings is a list of objects. We need to check to see if there are objects in that list
droppableObjects = [x for x in person.inventory if x.portable]
return len(droppableObjects) > 0
def do_action(self, person, world):
droppableObjects = [x for x in person.inventory if x.portable]
o = random.choice(droppableObjects)
o.location[0] = person.location[0] # move the object to where it's dropped!
o.location[1] = person.location[1]
person.inventory += [o]
world.add_object_to_world(o)
print(str(person) + " dropped " + str(o))
class AttackAction (Action):
def __init__(self, amount_of_damage):
self.amount_of_damage = amount_of_damage
def can_do_action(self, person, world):
# surroundings is a list of objects. We need to check to see if there are objects in that list
attackablePeople = [x for x in world.get_nearby_people(person.location) if x != person and x.health > 0]
return len(attackablePeople) > 0
def do_action(self, person, world):
attackablePeople = [x for x in world.get_nearby_people(person.location) if x != person and x.health > 0]
o = random.choice(attackablePeople)
o.health -= self.amount_of_damage
print(str(person) + " attacked " + str(o) + " for " + str(self.amount_of_damage) + " damage")
class FleeAction (Action):
def __init__(self):
pass
def can_do_action(self, person, world):
visiblePeople = [x for x in world.get_visible_people(person.location) if x != person and x.health > 0]
return len(visiblePeople) > 0
def do_action(self, person, world):
visiblePeople = [x for x in world.get_visible_people(person.location) if x != person and x.health > 0]
fleeVector = Vector2Int.sum_positions([person.location - p.location for p in visiblePeople]) # this is the vector pointing away from everyone
# print(str(fleeVector))
if (fleeVector.x == 0 and fleeVector.y == 0):
# pick a random direction!
while True:
newPos = person.location + Vector2Int.get_random_direction()
if (world.is_inside_bounds(newPos)):
person.location = newPos
break
print(str(person) + " ran in a random direction")
else:
# flee in that direction, just normalized!
person.location = person.location + fleeVector.clamp_to_ones()
world.clamp_to_bounds(person.location)
print(str(person) + " ran away from people nearby")
class ChaseAction (Action):
def __init__(self):
pass
def can_do_action(self, person, world):
visiblePeople = [x for x in world.get_visible_people(person.location) if x.location != person.location and x.health > 0]
return len(visiblePeople) > 0
def do_action(self, person, world):
visiblePeople = [x for x in world.get_visible_people(person.location) if x != person and x.health > 0]
p = random.choice(visiblePeople)
direction = (p.location - person.location).clamp_to_ones()
person.location = person.location + direction
print(str(person) + " chases after " + str(p))
# print("CLAMP THIS VECTOR THEN MOVE IN THAT DIRECTION! " + str(direction) + " from " + str(p.location - person.location))
class ExploreAction (Action):
def __init__(self):
pass
def can_do_action(self, person, world):
# explore if no one is visible!
return len([x for x in world.get_visible_people(person.location) if x != person and x.health > 0]) <= 0 and len(world.get_visible_objects(person.location)) == 0
def do_action(self, person, world):
while True:
newPos = person.location + Vector2Int.get_random_direction()
if (world.is_inside_bounds(newPos)):
person.location = newPos
break
print(str(person) + " explored in a random direction")
class CannibalismAction (Action):
def __init__(self):
pass
def can_do_action(self, person, world):
return len([x for x in world.get_nearby_people(person.location) if x != person and x.health == 0 and not x.has_been_eaten]) > 0
def do_action(self, person, world):
toEat = random.choice([x for x in world.get_nearby_people(person.location) if x != person and x.health == 0 and not x.has_been_eaten])
person.health += 2
toEat.has_been_eaten = True
print(str(person) + " ate " + str(toEat))
class MoveTowardsRandomPersonByChance (Action):
def __init__(self):
pass # this class is here to basically make them eventually move towards each other so that the games aren't infinite :P
def can_do_action(self, person, world):
# explore if no one is visible!
return len(world.get_visible_people(person.location)) <= 1 and len(world.get_visible_objects(person.location)) == 0
def do_action(self, person, world):
allPeople = [x for x in world.people if x != person]
p = random.choice(allPeople)
direction = (p.location - person.location).clamp_to_ones()
person.location = person.location + direction
print(str(person) + " randomly moved towards " + str(p))
class Item:
def __init__(self, string):
self.portable = True
self.location = Vector2Int(0, 0)
self.actions = []
class Person:
def __init__(self, name):
self.name = name
self.health = 100
self.inventory = [] # full of items
self.location = Vector2Int(0, 0)
self.points_to_investigate = [] # these could be smoke visible from campfires or last seen locations of people or interesting objects! They should slowly decay over time
self.has_been_eaten = False
self.display_letter = "#"
def get_actions(self, world):
# get a list of all possible actions from your inventory!
actions = []
# now check all the object actions
for o in self.inventory:
for a in o.actions:
if a.can_do_action(self, world):
actions += [a]
# now check all the default actions that everyone can do!
for a in world.defaultPeopleActions:
if a.can_do_action(self, world):
actions += [a]
return actions
def take_turn(self, world):
actions = self.get_actions(world)
if len(actions) == 0:
return False
else:
# take an action!
a = random.choice(actions)
a.do_action(self, world)
return True
def __repl__(self):
return str(self.name) + " at " + str(self.health) + " health at " + str(self.location)
def __str__(self):
return self.__repl__()
class World:
def __init__(self):
self.npcs = []
self.people = []
self.objects = []
self.dimensions = [11, 11]
self.defaultPeopleActions = []
self.unique_identifiers = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
def get_nearby_objects(self, location):
return self.get_objects_within_range(location, NEARBY_DISTANCE)
def get_nearby_people(self, location):
return self.get_people_within_range(location, NEARBY_DISTANCE)
def get_visible_objects(self, location):
return self.get_objects_within_range(location, VISIBLE_DISTANCE)
def get_visible_people(self, location):
return self.get_people_within_range(location, VISIBLE_DISTANCE)
def is_inside_bounds(self, position):
return position.x >= 0 and position.y >= 0 and position.x <= self.dimensions[0] and position.y <= self.dimensions[1]
def clamp_to_bounds(self, position):
position.x = min(self.dimensions[0], max(position.x, 0))
position.y = min(self.dimensions[1], max(position.y, 0))
def get_objects_within_range(self, location, distance):
nearby = []
for o in self.objects:
if location.get_distance(o.location) <= distance:
nearby += [o]
return nearby
def get_people_within_range(self, location, distance):
nearby = []
for o in self.npcs:
if location.get_distance(o.location) <= distance:
nearby += [o]
for o in self.people:
if location.get_distance(o.location) <= distance:
nearby += [o]
return nearby
def get_random_coords(self):
return Vector2Int(random.randint(1, self.dimensions[0]), random.randint(1, self.dimensions[1]))
def get_center_coords(self):
c = Vector2Int(int(self.dimensions[0]/2), int(self.dimensions[1]/2))
return c
def add_object_to_world(self, o):
self.objects += [o]
def remove_object_from_world(self, o):
self.objects.remove(o)
def get_all_positions(self):
# loop through all the objects and add the positions of things to a dictionary list so we know what is where!
pos = {}
for p in self.people:
loc = p.location.to_tuple()
if loc not in pos:
pos[loc] = []
pos[loc] += [p]
return pos
def print_world(self):
# this is my current attempt at drawing the world tile by tile!
y_scale = 2
x_scale = 2
positions = self.get_all_positions()
for p in self.people:
print(str(p) + " => " + str(p.display_letter))
for i in range(y_scale):
print("+"*x_scale + "-" *(self.dimensions[0]+1)*x_scale + "+"*x_scale)
for y in range(self.dimensions[1]+1):
for i in range(y_scale):
print("|"*x_scale, end = "")
for x in range(self.dimensions[0]+1):
pos = (x, y)
for j in range(x_scale):
if pos in positions:
# then we have something in this tile!
t = positions[pos][0]
positions[pos] = positions[pos][1:] # remove the first item from the list now!
if len(positions[pos]) == 0:
del positions[pos] # remove the list entirely so we know there are no more items left!
print(t.display_letter, end="")
else:
if x % 2 == y % 2:
print(" ", end="")
else:
print(".", end = "")
print("|"*x_scale) # new line
for i in range(y_scale):
print("+"*x_scale + "-" *(self.dimensions[0]+1)*x_scale + "+"*x_scale)
def randomize_position(self, obj):
obj.location = self.get_random_coords()
def randomize_all_object_positions(self):
for o in self.objects:
self.randomize_position(o)
def initialize_people_positions(self):
for p in self.people:
p.location = self.get_center_coords()
def get_all_players_as_string(self):
x = [str(p) for p in self.people]
return ", ".join(x)
def assign_all_players_unique_display_letters(self):
random.shuffle(self.people)
for p in self.people:
self.assign_player_unique_display_letter(p)
def assign_player_unique_display_letter(self, o):
preferredName = o.name
while len(preferredName) > 0:
if (preferredName[0].upper() in self.unique_identifiers):
o.display_letter = preferredName[0].upper()
self.unique_identifiers = self.unique_identifiers.replace(o.display_letter, "")
return
elif (preferredName[0].lower() in self.unique_identifiers):
o.display_letter = preferredName[0].lower()
self.unique_identifiers = self.unique_identifiers.replace(o.display_letter, "")
return
else:
preferredName = preferredName[1:]
# there are no letters in the name that haven't been taken so just assign a random letter...
o.display_letter = self.unique_identifiers[0]
self.unique_identifiers = self.unique_identifiers[1:]
def take_turn(self):
# shuffle the order of all the characters:
random.shuffle(self.npcs)
random.shuffle(self.people)
# first evaluate all the environmental effects
something_did_something = False
for n in self.npcs:
if n.health <= 0:
continue # dead things can't do things!
something_did_something |= n.take_turn(self)
# then evaluate the people
for p in self.people:
if p.health <= 0:
continue # dead things can't do things!
something_did_something |= p.take_turn(self)
# check if we have a winner!
alive_players = []
for p in self.people:
if p.health > 0:
alive_players += [p]
if len(alive_players) == 0:
# then we have a tie!
print("\nWe have a tie!")
print(self.get_all_players_as_string())
return False # simulation ended
elif len(alive_players) == 1:
print("\nWe have a winner! " + str(alive_players[0]))
print(self.get_all_players_as_string())
return False # simulation ended
else:
return something_did_something # more turns
def parse_objects(mega_string, world):
# parses data into a bunch of objects I guess?
lines = mega_string.split("\n")
lines = [l.strip() for l in lines]
for l in lines:
pass # parse each line!
if len(l) <= 1:
continue
parse_line(l, world)
def parse_line(line, world):
pass
class Vector2Int:
def __init__(self, x, y):
self.x = x
self.y = y
def get_distance(locA, locB):
d = abs(locA[0] - locB[0])
d += abs(locA[1] - locB[1])
return d
def to_tuple(self):
return (self.x, self.y)
def sum_positions(positions):
d = Vector2Int(0, 0)
for p in positions:
d.x += p.x
d.y += p.y
return d
def get_random_direction():
x = random.randint(-1, 1)
y = random.randint(-1, 1)
while x == 0 and y == 0:
x = random.randint(-1, 1)
y = random.randint(-1, 1)
return Vector2Int(x, y)
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
def __setitem__(self, key, value):
if key == 0:
self.x = value
return self.x
elif key == 1:
self.y = value
return self.y
def __add__(self, other):
if type(other) == Vector2Int:
return Vector2Int(self.x + other.x, self.y + other.y)
def __sub__(self, other):
if type(other) == Vector2Int:
return Vector2Int(self.x - other.x, self.y - other.y)
def __neg__(self):
return Vector2Int(-self.x, -self.y)
def __abs__(self):
return Vector2Int(abs(self.x), abs(self.y))
def __eq__(self, other):
if isinstance(other, Vector2Int):
return self.x == other.x and self.y == other.y
return False # just assume it's not the same then
def block_magnitude(self):
return abs(self.x) + abs(self.y)
def __str__(self):
return "(" + str(self.x) + ", " + str(self.y) + ")"
def __repl__(self):
return str(self)
def clamp_to_ones(self):
new_x = round(self.x)
new_y = round(self.y)
return Vector2Int(new_x, new_y)
if __name__ == "__main__":
# then run a test simulation I guess?
w = World()
parse_objects(data, w)
w.defaultPeopleActions += [PickupAction(), DropAction(), AttackAction(1), FleeAction(), ChaseAction(), ExploreAction(), MoveTowardsRandomPersonByChance(), CannibalismAction()]
w.people += [Person("Jordan")]
w.people += [Person("Schuyler")]
w.people += [Person("Jenna")]
w.people += [Person("Joe")]
# get ready to start the match
w.initialize_people_positions()
w.randomize_all_object_positions()
w.assign_all_players_unique_display_letters()
took_turn = True
t = 0
while took_turn:
print("\n\n\nTurn " + str(t))
print(w.get_all_players_as_string())
w.print_world()
print()
took_turn = w.take_turn()
t += 1
w.print_world()
# the simulation is over!
| null | source/simpleroyaletest.py | simpleroyaletest.py | py | 15,682 | python | en | code | null | code-starcoder2 | 51 |
443419789 | valor = float(input('Entre com o valor do produto='))
codigo = int(input('Entre com o codigo do produto='))
if codigo == 1:
desconto = valor * 0.1
valor_final = valor - desconto
print('Seu produto custara = {}'.format(valor_final))
elif codigo == 2:
desconto = valor * 0.05
valor_final = valor - desconto
print('Seu produto custara = {}'.format(valor_final))
elif codigo == 3:
parcela = valor / 2
print('Seu produto custara 2 parcelas de = {}'.format(parcela))
elif codigo == 4:
acrescimo = valor * 0.1
valor_final = valor + acrescimo
valor_dividido = valor_final / 3
print('Seu produto tera acrescimo de 10% custara = {}'.format(valor_final))
print('3 parcelas de = {}'.format(valor_dividido))
else:
print('Codigo invalido')
| null | Algoritmos - Python/Exercicios Python/exercicio11_2.8.py | exercicio11_2.8.py | py | 784 | python | en | code | null | code-starcoder2 | 51 |
230453446 | #!/usr/bin/python3
"""List all State objects from db"""
import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from model_state import Base, State
def first_state():
""" Arguments argv to connect to database
argv[1]: mysql username
argv[2]: mysql password
argv[3]: database name
"""
engine = create_engine("mysql+mysqldb://{}:{}@localhost/{}"
.format(sys.argv[1], sys.argv[2], sys.argv[3]),
pool_pre_ping=True)
Base.metadata.create_all(engine)
session = Session(engine)
var = session.query(State).first()
if var:
print("{}: {}".format(var.__dict__['id'], var.__dict__['name']))
else:
print("Nothing")
session.close()
if __name__ == "__main__":
first_state()
| null | 0x0F-python-object_relational_mapping/8-model_state_fetch_first.py | 8-model_state_fetch_first.py | py | 813 | python | en | code | null | code-starcoder2 | 51 |
166077480 | import curses
from output import OutputModule
from input import InputModule
from domains.Student import *
from domains.Course import *
from domains.Mark import *
class MainModule:
# main
s = int(student_num())
l = 1
while l <= s:
l += 1
add_student()
show_list_student()
c = int(number_course())
p = 1
while p <= c:
p += 1
add_course()
show_list_course()
create_mark()
for i in range(0, len(Course)):
print("Show mark? 1. YES 2. NO")
ol = int(input("You Choose: "))
if ol == 1:
print("--STUDENT MARK--")
show_mark()
break
# GPA
mark_gpa()
mark_cal()
| null | pw4/main.py | main.py | py | 685 | python | en | code | null | code-starcoder2 | 51 |
511095428 | # coding=utf-8
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
WIDGET
Base class for widgets.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2020 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
import pygame
import pygame_menu.baseimage as _baseimage
import pygame_menu.font as _fonts
import pygame_menu.locals as _locals
from pygame_menu.widgets.core.selection import Selection
from pygame_menu.sound import Sound
from pygame_menu.utils import make_surface, assert_alignment, assert_color, assert_position, assert_vector2
from uuid import uuid4
import time
class Widget(object):
"""
Widget abstract class.
:param title: Widget title
:type title: str
:param widget_id: Widget identifier
:type widget_id: str
:param onchange: Callback when changing the selector
:type onchange: function, None
:param onreturn: Callback when pressing return button
:type onreturn: callable, None
:param args: Optional arguments for callbacks
:param kwargs: Optional keyword-arguments for callbacks
"""
def __init__(self,
title='',
widget_id='',
onchange=None,
onreturn=None,
args=None,
kwargs=None
):
assert isinstance(title, str)
assert isinstance(widget_id, str)
if onchange:
assert callable(onchange), 'onchange must be callable or None'
if onreturn:
assert callable(onreturn), 'onreturn must be callable or None'
# Store id, if None or empty create new ID based on UUID
if widget_id is None or len(widget_id) == 0:
widget_id = uuid4()
self._attributes = {} # Stores widget attributes
self._alignment = _locals.ALIGN_CENTER
self._background_color = None
self._background_inflate = (0, 0)
self._events = [] # type: list
self._id = str(widget_id)
self._margin = (0.0, 0.0) # type: tuple
self._max_width = None # type: (int,float)
self._rect = pygame.Rect(0.0, 0.0, 0.0, 0.0) # type: (pygame.Rect,None)
self._selected_rect = None # type: (pygame.rect.Rect,None)
self._selection_time = 0 # type: float
self._title = title
self._args = args or [] # type: list
self._kwargs = kwargs or {} # type: dict
self._on_change = onchange # type: callable
self._on_return = onreturn # type: callable
# Surface of the widget
self._surface = None # type: (pygame.Surface,None)
# Menu reference
self._menu = None
# If this is True then the widget forces the Menu to update because the
# widget render has changed
self._menu_surface_needs_update = False
# Modified in set_font() method
self._font = None # type: (pygame.font.Font,None)
self._font_antialias = True # type: bool
self._font_background_color = None # type: (tuple, None)
self._font_color = (0, 0, 0) # type: tuple
self._font_name = '' # type: str
self._font_selected_color = (255, 255, 255) # type: tuple
self._font_size = 0 # type: int
# Text shadow
self._shadow = False # type: bool
self._shadow_color = (0, 0, 0) # type: tuple
self._shadow_offset = 2.0 # type: float
self._shadow_position = _locals.POSITION_NORTHWEST
self._shadow_tuple = None # (x px offset, y px offset)
self._create_shadow_tuple()
# Rendering, this variable may be used by render() method
# If the hash of the variables change respect to the last render hash
# (hash computed using self._hash_variables() method)
# then the widget should render and update the hash
self._last_render_hash = 0 # type: int
# Stores the last render surface size, updated by _check_render_size_changed()
self._last_render_surface_size = (0, 0)
self._selection_effect = None # type: Selection
# Public attributes
self.active = False # Widget requests focus
self.is_selectable = True # Some widgets cannot be selected like labels
self.joystick_enabled = True
self.mouse_enabled = True
self.selected = False
self.selection_effect_enabled = True # Some widgets cannot have selection effect
self.sound = Sound() # type: Sound
def set_attribute(self, key, value):
"""
Set widget attribute.
:param key: Key of the attribute
:type key: str
:param value: Value of the attribute
:type value: Any
:return: None
"""
assert isinstance(key, str)
self._attributes[key] = value
def get_attribute(self, key, default):
"""
Get attribute value.
:param key: Key of the attribute
:type key: str
:param default: Value if does not exists
:type default: Any
:return: Attribute data
:rtype: Any
"""
assert isinstance(key, str)
if key not in self._attributes.keys():
return default
return self._attributes[key]
@staticmethod
def _hash_variables(*args):
"""
Compute hash from a series of variables.
:param args: Variables to compute hash
:type args: Object
:return: Hash data
:rtype: int
"""
return hash(args)
def _render_hash_changed(self, *args):
"""
This method checks if the widget must render because the inner variables changed.
This method should include all the variables.
If the render changed,
:param args: Variables to check the hash
:type args: Object
:return: Hash data
:rtype: int
"""
_hash = self._hash_variables(*args)
if _hash != self._last_render_hash:
self._last_render_hash = _hash
return True
return False
def set_title(self, title): # lgtm [py/inheritance/incorrect-overridden-signature]
"""
Update the widget title.
:param title: New title
:type title: str
:return: None
"""
self._title = str(title)
self._apply_font()
self._render()
self._check_render_size_changed()
def get_title(self):
"""
Return the widget title.
:return: Widget title
:rtype: str
"""
return self._title
def set_background_color(self, color, inflate=(0, 0)):
"""
Set widget background color.
:param color: Widget background color
:type color: tuple, list, :py:class:`pygame_menu.baseimage.BaseImage`, None
:param inflate: Inflate background in x,y
:type inflate: tuple, list
:return: None
"""
if color is not None:
if isinstance(color, _baseimage.BaseImage):
assert color.get_drawing_mode() == _baseimage.IMAGE_MODE_FILL, \
'currently widget only support IMAGE_MODE_FILL drawing mode'
else:
assert_color(color)
assert_vector2(inflate)
assert inflate[0] >= 0 and inflate[1] >= 0, \
'widget background inflate must be equal or greater than zero in both axis'
self._background_color = color
self._background_inflate = inflate
def _fill_background_color(self, surface):
"""
Fill a surface with the widget background color.
:param surface: Surface to fill
:type surface: :py:class:`pygame.Surface`
:return: None
"""
if self._background_color is None:
return
if isinstance(self._background_color, _baseimage.BaseImage):
self._background_color.draw(
surface=surface,
area=self._rect.inflate(*self._background_inflate),
position=(self._rect.x - self._background_inflate[0] / 2,
self._rect.y - self._background_inflate[1] / 2)
)
else:
surface.fill(self._background_color, self._rect.inflate(*self._background_inflate))
def get_selection_effect(self):
"""
:return: Selection effect
:rtype: :py:class:`pygame_menu.widgets.core.Selection`
"""
return self._selection_effect
def set_selection_effect(self, selection):
"""
Set the selection effect handler.
:param selection: Selection effect class
:type selection: :py:class:`pygame_menu.widgets.core.Selection`
:return: None
"""
assert isinstance(selection, Selection)
self._selection_effect = selection
def apply(self, *args):
"""
Run ``on_return`` callback when return event. A callback function
receives the following arguments:
.. code-block:: python
callback_func( value, *args, *widget._args, **widget._kwargs )
with:
- ``value`` (if something is returned by ``get_value()``)
- ``args`` given to this method
- ``args`` of the widget
- ``kwargs`` of the widget
:param args: Extra arguments passed to the callback
:return: None
"""
if self._on_return:
args = list(args) + list(self._args)
try:
args.insert(0, self.get_value())
except ValueError:
pass
return self._on_return(*args, **self._kwargs)
def change(self, *args):
"""
Run ``on_change`` callback after change event is triggered. A callback function
receives the following arguments:
.. code-block:: python
callback_func( value, *args, *widget._args, **widget._kwargs )
with:
- ``value`` (if something is returned by ``get_value()``)
- ``args`` given to this method
- ``args`` of the widget
- ``kwargs`` of the widget
:param args: Extra arguments passed to the callback
:return: None
"""
if self._on_change:
args = list(args) + list(self._args)
try:
args.insert(0, self.get_value())
except ValueError:
pass
return self._on_change(*args, **self._kwargs)
def draw(self, surface):
"""
Draw the widget shape.
:param surface: Surface to draw
:type surface: :py:class:`pygame.Surface`
:return: None
"""
raise NotImplementedError('override is mandatory')
def draw_selection(self, surface):
"""
Draw selection effect on widget.
:param surface: Surface to draw
:type surface: :py:class:`pygame.Surface`
:return: None
"""
if not self.is_selectable or self._selection_effect is None or not self.selection_effect_enabled:
return
self._selection_effect.draw(surface, self)
def set_max_width(self, width):
"""
Set widget max width (column support) if force_fit_text is enabled.
:param width: Width in px, None if max width is disabled
:type width: int, float, None
:return: None
"""
if width is not None:
assert isinstance(width, (int, float))
self._max_width = width
def get_margin(self):
"""
:return: Widget margin
:rtype: tuple
"""
return self._margin
def set_margin(self, x, y):
"""
Set Widget margin.
:param x: Margin on x axis
:type x: int, float
:param y: Margin on y axis
:type y: int, float
:return: None
"""
assert isinstance(x, (int, float))
assert isinstance(y, (int, float))
self._margin = (x, y)
def get_rect(self):
"""
Return the Rect object, this forces the widget rendering
:return: Widget rect
:rtype: :py:class:`pygame.Rect`
"""
self._render()
return self._rect.copy()
def get_value(self):
"""
Return the value. If exception ``ValueError`` is raised,
no value will be passed to the callbacks.
:return: Value
:rtype: Object
"""
raise ValueError('{}({}) does not accept value'.format(self.__class__.__name__,
self.get_id()))
def get_id(self):
"""
Returns the widget ID.
:return: ID
:rtype: str
"""
return self._id
def _render(self):
"""
Render the widget surface.
This method shall update the attribute ``_surface`` with a pygame.Surface
representing the outer borders of the widget.
:return: None
"""
raise NotImplementedError('override is mandatory')
def _font_render_string(self, text, color=(0, 0, 0), use_background_color=True):
"""
Render text.
:param text: Text to render
:type text: str
:param color: Text color
:type color: tuple
:param use_background_color: Use default background color
:type use_background_color: bool
:return: Text surface
:rtype: :py:class:`pygame.Surface`
"""
assert isinstance(text, str)
assert isinstance(color, tuple)
assert isinstance(use_background_color, bool)
bgcolor = self._font_background_color
# Background color must be opaque, otherwise the results are quite bad
if isinstance(bgcolor, (tuple, list)) and len(bgcolor) == 4 and bgcolor[3] != 255:
bgcolor = None
# Disable
if not use_background_color:
bgcolor = None
return self._font.render(text, self._font_antialias, color, bgcolor)
def _check_render_size_changed(self):
"""
Check the size changed after rendering.
This method should be used only on widgets that can change in size, or if the size
is changed during execution time (like set_title).
The update status (needs update if render size changed) is returned by
Widget.surface_needs_update() method.
:return: Boolean, if True the size changed
:rtype: bool
"""
if self._rect.size != self._last_render_surface_size:
self._last_render_surface_size = self._rect.size
self._menu_surface_needs_update = True
return True
return False
def _render_string(self, string, color):
"""
Render text and turn it into a surface.
:param string: Text to render
:type string: str
:param color: Text color
:type color: tuple
:return: Text surface
:rtype: :py:class:`pygame.Surface`
"""
text = self._font_render_string(string, color)
# Create surface
surface = make_surface(width=text.get_width(),
height=text.get_height(),
alpha=True)
# Draw shadow first
if self._shadow:
text_bg = self._font_render_string(string, self._shadow_color)
surface.blit(text_bg, self._shadow_tuple)
surface.blit(text, (0, 0))
new_width = surface.get_size()[0]
new_height = surface.get_size()[1]
if self._max_width is not None and new_width > self._max_width:
surface = pygame.transform.smoothscale(surface, (self._max_width, new_height))
return surface
def surface_needs_update(self):
"""
Checks if the widget width/height has changed because events. If so, return true and
set the status of the widget (menu widget position needs update) as false. This method
is used by .update() from Menu class.
:return: True if the widget position has changed by events after the rendering.
:rtype: bool
"""
if self._menu_surface_needs_update:
self._menu_surface_needs_update = False
return True
return False
def set_font(self, font, font_size, color, selected_color, background_color, antialias=True):
"""
Set the text font.
:param font: Name or list of names for font (see pygame.font.match_font for precise format)
:type font: str, list
:param font_size: Size of font in pixels
:type font_size: int
:param color: Text color
:type color: tuple
:param selected_color: Text color when widget is selected
:type selected_color: tuple
:param background_color: Font background color
:type background_color: tuple
:param antialias: Determines if antialias is applied to font (uses more processing power)
:type antialias: bool
:return: None
"""
assert isinstance(font, str)
assert isinstance(font_size, int)
assert isinstance(color, tuple)
assert isinstance(selected_color, tuple)
assert isinstance(background_color, (tuple, type(None)))
assert isinstance(antialias, bool)
self._font = _fonts.get_font(font, font_size)
self._font_antialias = antialias
self._font_background_color = background_color
self._font_color = color
self._font_name = font
self._font_selected_color = selected_color
self._font_size = font_size
self._apply_font()
def get_font_info(self):
"""
Return a dict with the information of the widget font.
:return: Dict, keys: size (int), name (str), color (tuple), selected_color (tuple), antialias (bool)
:rtype: dict
"""
return {
'size': self._font_size,
'name': self._font_name,
'color': self._font_color,
'selected_color': self._font_selected_color,
'antialias': self._font_antialias,
}
def set_menu(self, menu):
"""
Set menu reference.
:param menu: Menu object
:type menu: :py:class:`pygame_menu.Menu`
:return: None
"""
self._menu = menu
def get_menu(self):
"""
Return menu reference (if exists).
:return: Menu reference
:rtype: :py:class:`pygame_menu.Menu`
"""
return self._menu
def _apply_font(self):
"""
Function triggered after a font is applied to the widget.
:return: None
"""
raise NotImplementedError('override is mandatory')
def set_position(self, posx, posy):
"""
Set the position.
:param posx: X position
:type posx: int, float
:param posy: Y position
:type posy: int, float
:return: None
"""
self._rect.x = posx
self._rect.y = posy
def set_alignment(self, align):
"""
Set the alignment of the widget.
:param align: Widget align, see locals
:type align: str
:return: None
"""
assert_alignment(align)
self._alignment = align
def get_alignment(self):
"""
Returns widget alignment.
:return: Widget align, see locals
:rtype: str
"""
return self._alignment
def set_selected(self, selected=True):
"""
Mark the widget as selected.
:param selected: Set item as selected
:type selected: bool
:return: None
"""
self.selected = selected
self.active = False
if selected:
self._focus()
self._selection_time = time.time()
else:
self._blur()
self._events = [] # Remove events
self._render()
def get_selected_time(self):
"""
Return time the widget has been selected in miliseconds.
If the widget is not currently selected, return 0.
:return: Time in ms
:rtype: float
"""
if not self.selected:
return 0
return (time.time() - self._selection_time) * 1000
def _focus(self):
"""
Function that is executed when the widget receives a focus (is selected).
:return: None
"""
pass
def _blur(self):
"""
Function that is executed when the widget loses the focus.
:return: None
"""
pass
def set_shadow(self, enabled=True, color=None, position=None, offset=None):
"""
Show text shadow.
:param enabled: Shadow is enabled or not
:type enabled: bool
:param color: Shadow color
:type color: list, None
:param position: Shadow position
:type position: str, None
:param offset: Shadow offset
:type offset: int, float, None
:return: None
"""
self._shadow = enabled
if color is not None:
assert_color(color)
self._shadow_color = color
if position is not None:
assert_position(position)
self._shadow_position = position
if offset is not None:
assert isinstance(offset, (int, float))
if offset <= 0:
raise ValueError('shadow offset must be greater than zero')
self._shadow_offset = offset
# Create shadow tuple position
self._create_shadow_tuple()
def set_sound(self, sound):
"""
Set sound engine to the widget.
:param sound: Sound object
:type sound: :py:class:`pygame_menu.sound.Sound`
:return: None
"""
self.sound = sound
def _create_shadow_tuple(self):
"""
Create shadow position tuple.
:return: None
"""
x = 0
y = 0
if self._shadow_position == _locals.POSITION_NORTHWEST:
x = -1
y = -1
elif self._shadow_position == _locals.POSITION_NORTH:
y = -1
elif self._shadow_position == _locals.POSITION_NORTHEAST:
x = 1
y = -1
elif self._shadow_position == _locals.POSITION_EAST:
x = 1
elif self._shadow_position == _locals.POSITION_SOUTHEAST:
x = 1
y = 1
elif self._shadow_position == _locals.POSITION_SOUTH:
y = 1
elif self._shadow_position == _locals.POSITION_SOUTHWEST:
x = -1
y = 1
elif self._shadow_position == _locals.POSITION_WEST:
x = -1
self._shadow_tuple = (x * self._shadow_offset, y * self._shadow_offset)
def set_controls(self, joystick=True, mouse=True):
"""
Enable interfaces to control the widget.
:param joystick: Use joystick
:type joystick: bool
:param mouse: Use mouse
:type mouse: bool
:return: None
"""
self.joystick_enabled = joystick
self.mouse_enabled = mouse
def set_value(self, value):
"""
Set the value.
.. warning:: This method does not fire the callbacks as it is
called programmatically. This behavior is deliberately
chosen to avoid infinite loops.
:param value: Value to be set on the widget
:type value: Object
:return: None
"""
raise ValueError('{}({}) does not accept value'.format(self.__class__.__name__,
self.get_id()))
def update(self, events):
"""
Update internal variable according to the given events list
and fire the callbacks.
:param events: List of pygame events
:type events: list[:py:class:`pygame.event.Event`]
:return: True if updated
:rtype: bool
"""
raise NotImplementedError('override is mandatory')
def _add_event(self, event):
"""
Add a custom event to the widget for the next update().
:param event: Custom event
:type event: :py:class:`pygame.event.Event`
"""
self._events.append(event)
def _merge_events(self, events):
"""
Append widget events to events list.
:param events: Event list
:type events: list[:py:class:`pygame.event.Event`]
:return: Augmented event list
:rtype: list[:py:class:`pygame.event.Event`]
"""
if len(self._events) == 0:
return events
copy_events = []
for e in events:
copy_events.append(e)
for e in self._events:
copy_events.append(e)
self._events = []
return copy_events
| null | tetris/venv/Lib/site-packages/pygame_menu/widgets/core/widget.py | widget.py | py | 26,029 | python | en | code | null | code-starcoder2 | 51 |
279812286 | import math
num =2
result =0
wow=1
def find(n):
check = int(math.sqrt(n))+1
for i in range(2,check):
if n%i==0:
return False
return True
while True:
if find(num)==True:
if wow==10001:
result=num
break
wow=wow+1
num=num+1
print(result)
| null | ProjectEuler/p7.py | p7.py | py | 319 | python | en | code | null | code-starcoder2 | 51 |
122682778 | import collections
class Graph:
def __init__(self,v):
self.nv=v
self.graph=collections.defaultdict(list)
self.count=0
def checkbc(self):
ss=self.nv
visited=[False]*ss
d=[float('inf')]*ss
low=[float('inf')]*ss
parent=[-1]*ss
if self.biconfunc(0,visited,parent,low,d):
return False
for i in visited:
if not(i):
return False
return True
def biconfunc(self,u,visited,parent,low,d):
children=0
visited[u]=True
d[u]=self.count
low[u]=self.count
self.count+=1
for v in self.graph[u]:
if not(visited[v]):
parent[v]=u
children+=1
if self.biconfunc(v,visited,parent,low,d):
return True
low[u]=min(low[u],low[v])
if parent[u]==-1 and children>1:
return True
if parent[u]!=-1 and low[v]>=d[u]:
return True
elif v!=parent[u]:
low[u]=min(low[u],d[v])
return False
n=int(input("Enter the no of vertices"))
g=Graph(n)
e=int(input("Enter the no of edges"))
print("Enter the vertex pairs to which edges must be constructed")
for i in range(e):
a,b=map(int,input().split())
g.graph[a-1].append(b-1)
g.graph[b-1].append(a-1)
dic={True:"It is biconnected",False:"Not biconnected"}
print(dic[g.checkbc()])
| null | bicon.py | bicon.py | py | 1,514 | python | en | code | null | code-starcoder2 | 51 |
369843511 | # -*- coding: utf-8 -*-
import unittest
from pageobjects.baidu_homepage import Baidupage
from framework.browser_engine import Browser_open
class Test_search(unittest.TestCase):
def setUp(self):
b = Browser_open()
self.driver = b.browseropen()
def tearDown(self):
self.driver.quit()
def testsearch(self):
baidu = Baidupage(self.driver)
baidu.inputtext('selenium')
baidu.click_search()
baidu.sleep(2)
baidu.screen()
try:
assert 'selenium' in baidu.title()
print('测试通过')
except Exception as e:
print('测试不通过', format(e))
def testxinwen(self):
baidu = Baidupage(self.driver)
baidu.click_newslink()
baidu.sleep(2)
baidu.screen()
try:
assert '新闻' in baidu.title()
print('测试通过')
except Exception as e:
print('测试不通过', format(e))
| null | selenium/src/testsuits/test_search.py | test_search.py | py | 1,004 | python | en | code | null | code-starcoder2 | 51 |
436405346 | from flask_restplus import Api, Resource, fields
from werkzeug.contrib.fixers import ProxyFix
from flask import Flask, url_for, jsonify
from elasticsearch import Elasticsearch
import json
### Setup elastic search connection
es_host = {"host": "elasticsearch1", "port": 9200}
es = Elasticsearch([es_host], retry_on_timeout=True, maxsize=25)
app = Flask(__name__)
api = Api(app,
version='1.0',
title='Swagger Test Page for Elasticsearch \"Geoname Data\" Search Templates',
description='Test Page for \"Geoname Data\" Searches',
prefix="/v1",
contact="john@swarmee.net",
contact_url="www.swarmee.net"
)
app.wsgi_app = ProxyFix(app.wsgi_app)
ns = api.namespace('city', description='Simple Endpoints to Test Elastic API operations')
query1 = api.model('query1', {
'typeAheadText': fields.String(default='Syd', required=True, description='Type Ahead Text'),
'typeAheadTemplate': fields.String(default='typeAhead', required=True, description='Template for Type Ahead'),
})
query2 = api.model('query2', {
'nearGeoNameId': fields.String(default='2293507', required=True, description='Search For Cities Near This GeoNameId'),
'nearGeoNameIdDistance': fields.String(default='100km', required=True, description='Distance From City to Include in results')
})
@ns.route('/typeAhead')
class typeAhead(Resource):
@ns.expect(query1)
def post(self):
typeAheadText = api.payload['typeAheadText']
typeAheadTemplate = api.payload['typeAheadTemplate']
abc = {'id': typeAheadTemplate ,'params': {'typeAheadText': typeAheadText}}
resp = es.search_template(index="city", body=abc, filter_path=['suggest.*suggestion.options.text','suggest.*suggestion.options._id'])
return jsonify(resp)
@ns.route('/typeAhead/Full')
class typeAheadFull(Resource):
@ns.expect(query1)
def post(self):
typeAheadText = api.payload['typeAheadText']
typeAheadTemplate = api.payload['typeAheadTemplate']
abc = {'id': typeAheadTemplate ,'params': {'typeAheadText': typeAheadText}}
resp = es.search_template(index="city", body=abc)
## resp['matches'] = resp.pop('hits')
## print(resp)
return jsonify(resp)
#### General search of geoname data using search term
@ns.route('/search/<searchTerms>')
class productSearch(Resource):
def get(self, searchTerms):
simpleSearchResponse = es.search(index="city", body="{\"query\": {\"simple_query_string\": {\"query\": \"%s\"}}}" % searchTerms)
return jsonify(simpleSearchResponse)
#### Search geoname data by geonameId
@ns.route('/search/<geonameId>')
class geonameIdSearch(Resource):
def get(self, geonameId):
geonameIdSearchResponse = es.search(index="city", body="{\"query\": {\"match\": {\"_id\": \"%s\"}}}" % geonameId)
return jsonify(geonameIdSearchResponse)
#### finds records near specific geo point - based on supplied distance ####
@ns.route('/search/NearGeoNameId')
class nearGeonameId(Resource):
@ns.expect(query2)
def post(self):
nearGeoNameId = api.payload['nearGeoNameId']
nearGeoNameIdDistance = api.payload['nearGeoNameIdDistance']
nearGeonameIdSearchResponse = es.search(index="city", body="{\"query\": {\"match\": {\"_id\": \"%s\"}}}" % nearGeoNameId, filter_path=['hits.hits._source.location.*'])
for row in nearGeonameIdSearchResponse["hits"]["hits"]:
getLatLon = row["_source"]["location"]
lon = getLatLon['lon']
lat = getLatLon['lat']
abc = {'id': 'nearGeoNameId' ,'params': {'lon': lon, 'lat': lat, 'distance' : nearGeoNameIdDistance }}
resp3 = es.search_template(index="city", body=abc, filter_path=['hits.total', 'hits.hits._source.asciiName', 'hits.hits._source.location', 'hits.hits._source.geonameId'])
# finalResponse = []
# for row in resp3["hits"]["hits"]:
# finalResponse.append(row["_source"])
return jsonify(resp3)
#### counts the number of city records stored in elastic ####
@ns.route('/count')
class geoname(Resource):
def get(self):
resp = es.count(index="city", filter_path=['-took','-timed_out','-_shards'])
return resp
#### provides indication if the elastic backend is healthy ####
@ns.route('/backEndHealth')
class backEndHealth(Resource):
def get(self):
resp = es.cluster.health(filter_path=['status'])
return resp
if __name__ == '__main__':
app.run(debug=True)
| null | elastic-stack-geonames-cities/geonames-cities-api/geonames-cities-api-using-payload.py | geonames-cities-api-using-payload.py | py | 4,546 | python | en | code | null | code-starcoder2 | 51 |
622131904 | import numpy
import math
execfile(os.path.join(os.path.dirname(__file__), 'rotations.py'))
import numpy
def rpyFunction(msg):
return quat_to_euler([msg.pose.rotation.w, msg.pose.rotation.x, msg.pose.rotation.y, msg.pose.rotation.z])
def rollFunction(msg):
'''roll'''
return msg.utime, rpyFunction(msg)[0]* 180.0/math.pi
def pitchFunction(msg):
'''pitch'''
return msg.utime, rpyFunction(msg)[1]* 180.0/math.pi
def yawFunction(msg):
'''yaw'''
return msg.utime, rpyFunction(msg)[2]* 180.0/math.pi
addPlot(timeWindow=15, yLimits=[-180, 180])
addSignalFunction('EST_ROBOT_STATE', rollFunction)
addSignalFunction('EST_ROBOT_STATE', pitchFunction)
addSignalFunction('EST_ROBOT_STATE', yawFunction)
addSignalFunction('COMMITTED_ROBOT_PLAN_STATES', rollFunction)
addSignalFunction('COMMITTED_ROBOT_PLAN_STATES', pitchFunction)
addSignalFunction('COMMITTED_ROBOT_PLAN_STATES', yawFunction)
addPlot(timeWindow=15, yLimits=[-1, 1])
addSignal('EST_ROBOT_STATE', msg.utime, msg.pose.translation.x)
addSignal('EST_ROBOT_STATE', msg.utime, msg.pose.translation.y)
addSignal('EST_ROBOT_STATE', msg.utime, msg.pose.translation.z)
addSignal('COMMITTED_ROBOT_PLAN_STATES', msg.utime, msg.pose.translation.x)
addSignal('COMMITTED_ROBOT_PLAN_STATES', msg.utime, msg.pose.translation.y)
addSignal('COMMITTED_ROBOT_PLAN_STATES', msg.utime, msg.pose.translation.z)
addPlot(timeWindow=15, yLimits=[-1, 1])
addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.l_foot_force_z)
addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.r_foot_force_z)
addSignal('FORCE_TORQUE', msg.utime, msg.sensors[0].force[2],label="left")
addSignal('FORCE_TORQUE', msg.utime, msg.sensors[1].force[2],label="right")
#addPlot(timeWindow=15, yLimits=[-1, 1])
#addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.l_foot_torque_x)
#addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.l_foot_torque_y)
#addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.r_foot_torque_x)
#addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.r_foot_torque_y)
| null | software/config/signal_scope/val/forcetorque.py | forcetorque.py | py | 2,059 | python | en | code | null | code-starcoder2 | 51 |
506532697 | import os
from dotenv import load_dotenv
import pymongo
import datetime
from bson.objectid import ObjectId
from flask import Flask, request, render_template, redirect, url_for, session, flash
from flask_login import LoginManager, UserMixin, current_user, login_user, logout_user, login_required
import bcrypt
from functools import wraps
app = Flask(__name__)
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY')
## necessary for python-dotenv ##
APP_ROOT = os.path.join(os.path.dirname(__file__), '..') # refers to application_top
dotenv_path = os.path.join(APP_ROOT, '.env')
load_dotenv(dotenv_path)
mongo = os.getenv('MONGO')
client = pymongo.MongoClient(mongo)
db = client['bucket_list'] # Mongo collection
users = db['users'] # Mongo document
roles = db['roles'] # Mongo document
categories = db['categories'] # Mongo document
bucketList = db['bucketList'] # Mongo document
status = db['status']
login = LoginManager()
login.init_app(app)
login.login_view = 'login'
@login.user_loader
def load_user(username):
u = users.find_one({"username": username})
if not u:
return None
return User(username=u['username'], role=u['role'], id=u['_id'])
class User:
def __init__(self, id, username, role):
self._id = id
self.username = username
self.role = role
@staticmethod
def is_authenticated():
return True
@staticmethod
def is_active():
return True
@staticmethod
def is_anonymous():
return False
def get_id(self):
return self.username
'''
@staticmethod
def check_password(password_hash, password):
return check_password_hash(password_hash, password)
'''
### custom wrap to determine role access ###
def roles_required(*role_names):
def decorator(original_route):
@wraps(original_route)
def decorated_route(*args, **kwargs):
if not current_user.is_authenticated:
print('The user is not authenticated.')
return redirect(url_for('login'))
print(current_user.role)
print(role_names)
if not current_user.role in role_names:
print('The user does not have this role.')
return redirect(url_for('login'))
else:
print('The user is in this role.')
return original_route(*args, **kwargs)
return decorated_route
return decorator
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/register')
def register():
return render_template('register.html')
@app.route('/add-user', methods=['GET', 'POST'])
def add_user():
if request.method == 'POST':
form = request.form
email = users.find_one({"email": request.form['email']})
if email:
flash('This email is already registered.', 'warning')
return 'This email has already been registered. back page on your browser to change email.'
username = users.find_one({"username": request.form['username']})
if username:
flash('This username is already registered.', 'warning')
return 'This username has already been registered. back page on your browser to change username.'
new_user = {
'first_name': form['first_name'],
'last_name': form['last_name'],
'username' : form['username'],
'email': form['email'],
'password': form['password'],
'role': form['role'],
'date_added': datetime.datetime.now(),
'date_modified': datetime.datetime.now()
}
users.insert_one(new_user)
flash(new_user['username'] + ' user has been added.', 'success')
return render_template('login.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
if request.method == 'POST':
user = users.find_one({"username": request.form['username']})
if user and user['password'] == request.form['password']:
user_obj = User(username=user['username'], role=user['role'], id=user['_id'])
login_user(user_obj)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('view_activities')
return redirect(next_page)
flash("Logged in successfully!", category='success')
return redirect(request.args.get("next") or url_for("view_activities"))
flash("Wrong username or password!", category='danger')
return render_template('login.html')
@app.route('/logout', methods=['GET', 'POST'])
def logout():
logout_user()
flash('You have successfully logged out.', 'success')
return redirect(url_for('login'))
@app.route('/my-account/<user_id>', methods=['GET', 'POST'])
@login_required
@roles_required('user', 'contributor', 'admin')
def my_account(user_id):
edit_account = users.find_one({'_id': ObjectId(user_id)})
if edit_account:
return render_template('my-account.html', user=edit_account)
flash('User not found.', 'warning')
return redirect(url_for('index'))
@app.route('/update-myaccount/<user_id>', methods=['GET', 'POST'])
@login_required
@roles_required('contributor', 'admin')
def update_myaccount(user_id):
if request.method == 'POST':
form = request.form
password = request.form['password']
users.update({'_id': ObjectId(user_id)},
{
'first_name': form['first_name'],
'last_name': form['last_name'],
'username' : form['username'],
'email': form['email'],
'password': password,
'role': form['role'],
'date_added': form['date_added'],
'date_modified': datetime.datetime.now()
})
update_user = users.find_one({'_id': ObjectId(user_id)})
flash(update_user['username'] + ' has been updated.', 'success')
return redirect(url_for('admin_users'))
return render_template('user-admin.html', all_roles=roles.find(), all_users=users.find())
@app.route('/about', methods=['GET', 'POST'])
def about():
return render_template('about.html')
########## Admin functionality -- User management ##########
@app.route('/admin/users', methods=['GET', 'POST'])
@login_required
@roles_required('admin')
def admin_users():
return render_template('user-admin.html', all_roles=roles.find(), all_users=users.find())
@app.route('/admin/add-user', methods=['GET', 'POST'])
@login_required
@roles_required('admin')
def admin_add_user():
if request.method == 'POST':
form = request.form
password = request.form['password']
email = users.find_one({"email": request.form['email']})
if email:
flash('This email is already registered.', 'warning')
return 'This email has already been registered.'
new_user = {
'first_name': form['first_name'],
'last_name': form['last_name'],
'username' : form['username'],
'email': form['email'],
'password': password,
'role': form['role'],
'date_added': datetime.datetime.now(),
'date_modified': datetime.datetime.now()
}
users.insert_one(new_user)
flash(new_user['username'] + ' user has been added.', 'success')
return redirect(url_for('admin_users'))
return render_template('user-admin.html', all_roles=roles.find(), all_users=users.find())
@app.route('/admin/delete-user/<user_id>', methods=['GET', 'POST'])
@login_required
@roles_required('admin')
def admin_delete_user(user_id):
delete_user = users.find_one({'_id': ObjectId(user_id)})
if delete_user:
users.delete_one(delete_user)
flash(delete_user['username'] + ' has been deleted.', 'warning')
return redirect(url_for('admin_users'))
flash('User not found.', 'warning')
return redirect(url_for('admin_users'))
@app.route('/admin/edit-user/<user_id>', methods=['GET', 'POST'])
@login_required
@roles_required('admin')
def admin_edit_user(user_id):
edit_user = users.find_one({'_id': ObjectId(user_id)})
if edit_user:
return render_template('edit-user.html', user=edit_user, all_roles=roles.find())
flash('User not found.', 'warning')
return redirect(url_for('admin_users'))
@app.route('/admin/update-user/<user_id>', methods=['GET', 'POST'])
@login_required
@roles_required('admin')
def admin_update_user(user_id):
if request.method == 'POST':
form = request.form
password = request.form['password']
users.update({'_id': ObjectId(user_id)},
{
'first_name': form['first_name'],
'last_name': form['last_name'],
'username' : form['username'],
'email': form['email'],
'password': password,
'role': form['role'],
'date_added': form['date_added'],
'date_modified': datetime.datetime.now()
})
update_user = users.find_one({'_id': ObjectId(user_id)})
flash(update_user['username'] + ' has been updated.', 'success')
return redirect(url_for('admin_users'))
return render_template('user-admin.html', all_roles=roles.find(), all_users=users.find())
@app.route('/activities/add-share-status', methods=['POST'])
@login_required
@roles_required('admin')
def add_share_status():
if request.method == 'POST':
form = request.form
share_status = users.find_one({"share_status": request.form['new_share_status']})
if share_status:
flash('This status is already registered.', 'warning')
return url_for('/admin_users')
new_share_status = {
'share_status': form['share_status'],
}
status.insert_one(new_share_status)
flash(new_status['share_status'] + ' has been added.', 'success')
return redirect(url_for('admin_activities'))
return render_template('activity-admin.html', all_status=status.find())
@app.route('/activities/delete_share_status/<share_status_id>', methods=['GET'])
@login_required
@roles_required('admin')
def delete_share_status(share_status_id):
delete_share_status = status.find_one({'_id': ObjectId(category_id)})
if delete_share_status:
status.delete_one(delete_share_status)
flash(delete_share_status['share_status'] + ' has been deleted.', 'danger')
return redirect(url_for('admin_activities'))
flash('activity not found.', 'warning')
return redirect(url_for('admin_activities'))
########## categories ##########
@app.route('/admin/categories', methods=['GET', 'POST'])
@login_required
@roles_required('admin')
def admin_categories():
return render_template('admin-categories.html', all_categories=categories.find())
@app.route('/add-category', methods=[ 'GET','POST'])
@login_required
@roles_required('admin')
def add_category():
if request.method == 'POST':
form = request.form
new_category = {
'category_name' : form['category_name']
}
categories.insert_one(new_category)
flash('New category has been added.', 'success')
return render_template('admin-categories.html', all_categories=categories.find())
@app.route('/categories/edit-category/<category_id>', methods=['GET', 'POST'])
@login_required
@roles_required('admin')
def edit_category(category_id):
edit_category = categories.find_one({'_id': ObjectId(category_id)})
if edit_category:
return render_template('edit-category.html', category=edit_category, all_categories=categories.find(), all_status=status.find())
flash('category not found.', 'danger')
return redirect(url_for('admin_categories'))
@app.route('/categories/update-category/<category_id>', methods=['POST'])
@login_required
@roles_required('admin')
def update_category(category_id):
if request.method == 'POST':
form = request.form
categories.update({'_id': ObjectId(category_id)},
{
'category_name' : form['category_name']
})
update_category = categories.find_one({'_id': ObjectId(category_id)})
flash(update_category['category_name'] + ' has been updated.', 'success')
return redirect(url_for('admin_categories'))
return render_template('edit-category.html', all_categories=categories.find())
@app.route('/categories/delete-category/<category_id>', methods=['POST'])
@login_required
@roles_required('admin')
def delete_category(category_id):
delete_category = categories.find_one({'_id': ObjectId(category_id)})
if delete_category:
categories.delete_one(delete_category)
flash(delete_category['category_name'] + ' has been deleted.', 'danger')
return redirect(url_for('admin_categories'))
flash('activity not found.', 'warning')
return redirect(url_for('admin_categories'))
########## activities ##########
@app.route('/activities', methods=['GET', 'POST'])
def view_activities():
return render_template('activities.html', all_bucketList=bucketList.find())
@app.route('/jump', methods=['GET', 'POST'])
def view_jump():
return "jump"
@app.route('/search-results', methods=['GET', 'POST'])
def view_search_results():
return render_template('search-results.html', search_string=search_string, all_bucketList=bucketList.find())
@app.route('/search', methods=['GET', 'POST'])
def search():
if request.method == 'POST':
form = request.form
search_string = request.form['search_string']
return render_template('search-results.html', search_string=search_string, all_bucketList=bucketList.find())
@app.route('/activities/my-bucket-list', methods=['GET', 'POST'])
@login_required
@roles_required('admin', 'contributor')
def view_my_activities():
return render_template('my-bucket-list.html', all_bucketList=bucketList.find())
@app.route('/activities/activities', methods=['GET', 'POST'])
@login_required
@roles_required('admin')
def admin_activities():
return render_template('activity-admin.html', all_categories=categories.find(), all_bucketList=bucketList.find(), all_status=status.find())
@app.route('/activities/new-activity', methods=['GET', 'POST'])
@login_required
@roles_required('admin', 'contributor')
def activity_page():
return render_template('new-activity.html', all_categories=categories.find(), all_bucketList=bucketList.find(), all_status=status.find())
@app.route('/activities/add-activity', methods=['POST'])
@login_required
@roles_required('admin', 'contributor')
def add_activity():
if request.method == 'POST':
form = request.form
new_activity = {
'activity_name' : form['activity_name'],
'category' : form['category'],
'description' : form['description'],
'share_status' : form['share_status'],
'estimated_cost' : form['estimated_cost'],
'address' : form['address'],
'city' : form['city'],
'state' : form['state'],
'country' : form['country'],
'expected_date' : form['expected_date'],
'username' : form['username'],
'date_added': datetime.datetime.now(),
'date_modified': datetime.datetime.now()
}
bucketList.insert_one(new_activity)
flash('New activity has been added.', 'success')
return redirect(url_for('view_my_activities'))
return render_template('new-activity.html', all_categories=categories.find())
@app.route('/activities/edit-activity/<activity_id>', methods=['GET', 'POST'])
@login_required
@roles_required('admin', 'contributor')
def edit_activity(activity_id):
edit_activity = bucketList.find_one({'_id': ObjectId(activity_id)})
if edit_activity:
return render_template('edit-activity.html', activity=edit_activity, all_categories=categories.find(), all_status=status.find())
flash('activity not found.', 'danger')
return redirect(url_for('admin_activities'))
@app.route('/activities/update-activity/<activity_id>', methods=['POST'])
@login_required
@roles_required('admin', 'contributor')
def update_activity(activity_id):
if request.method == 'POST':
form = request.form
bucketList.update({'_id': ObjectId(activity_id)},
{
'activity_name' : form['activity_name'],
'category' : form['category'],
'description' : form['description'],
'share_status' : form['share_status'],
'estimated_cost' : form['estimated_cost'],
'address' : form['address'],
'city' : form['city'],
'state' : form['state'],
'country' : form['country'],
'expected_date' : form['expected_date'],
'username' : form['username'],
'date_added': form['date_added'],
'date_modified': datetime.datetime.now()
})
update_activity = bucketList.find_one({'_id': ObjectId(activity_id)})
flash(update_activity['activity_name'] + ' has been updated.', 'success')
return redirect(url_for('view_activities'))
return render_template('edit-activity.html', all_categories=categories.find())
@app.route('/activities/delete-activity/<activity_id>', methods=['POST'])
@login_required
@roles_required('admin', 'contributor' )
def delete_activity(activity_id):
delete_activity = bucketList.find_one({'_id': ObjectId(activity_id)})
if delete_activity:
bucketList.delete_one(delete_activity)
flash(delete_activity['activity_name'] + ' has been deleted.', 'danger')
return redirect(url_for('view_activities'))
flash('activity not found.', 'warning')
return redirect(url_for('view_activities'))
# authenticated users can print a activity
@app.route('/activities/print-activity/<activity_id>', methods=['GET', 'POST'])
def print_activity(activity_id):
print_activity = bucketList.find_one({'_id': ObjectId(activity_id)})
if print_activity:
return render_template('print-activity.html', activity=print_activity)
flash('activity not found.', 'danger')
return redirect(url_for('view_activities'))
if __name__ == "__main__":
app.run(debug=True)
| null | app.py | app.py | py | 18,677 | python | en | code | null | code-starcoder2 | 51 |
472460838 | # -*- coding: utf-8 -*-
"""
Deck of character cards.
"""
import csv
from deck import Deck
class CharacterCard:
def __init__(self, cdict):
self.category = cdict['category']
self.name = cdict['name']
self.effect1 = cdict['effect1']
self.effect2 = cdict['effect2']
def __str__(self):
cstring = self.name + ' ' + self.effect1 + ' ' + self.effect2
return cstring
class CharacterDeck(Deck):
def __init__(self, csv_file):
character_cards = []
self.character_names = []
self.trait_names = []
with open(csv_file) as csvfile:
dreader = csv.DictReader(csvfile)
for rowd in dreader:
next_card = CharacterCard(rowd)
if next_card.category == "Character" \
and next_card.name not in self.character_names:
self.character_names.append(next_card.name)
if next_card.category == 'Trait' \
and next_card.name not in self.trait_names:
self.trait_names.append(next_card.name)
# Ignoring traits for now
if next_card.category == 'Trait':
continue
character_cards.append(next_card)
super().__init__(character_cards)
# Retrieve all of the possible characters
def characters(self):
return self.character_names
# Use only the listed characters, with given commitment and discord
# levels, in the deck
def filter_characters(self, ch1, com1, dis1, ch2, com2, dis2):
trimmed_characters = []
for cc in self.cards:
if cc.category == "Character":
if cc.name == ch1 or cc.name == ch2:
trimmed_characters.append(cc)
else:
trimmed_characters.append(cc)
self.character_names = [ch1, ch2]
self.cards = trimmed_characters
if __name__ == "__main__":
cdeck = CharacterDeck('../../csv/character-cards.csv')
chars = cdeck.characters()
print("Characters", chars)
cdeck.filter_characters(chars[0], 0, 0, chars[1], 0, 0)
chars = cdeck.characters()
print("Characters", chars)
cc = cdeck.draw()
print('draw', cc)
cdeck.discard(cc)
cc = cdeck.draw()
print('draw', cc)
cdeck.discard(cc)
cdeck.shuffle()
cc = cdeck.draw()
print('draw', cc)
cdeck.discard(cc) | null | src/ui/character_deck.py | character_deck.py | py | 2,719 | python | en | code | null | code-starcoder2 | 51 |
632017191 | import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import *
from scipy.interpolate import *
from scipy.optimize import *
# integration
def f(x):
return 3.0*x*x +1.0
I,err=quad(f,0,1)
print("I= ",I,"err: ",err)
x_given=np.linspace(0,10,10)
y_given=np.cos(x_given**2.0/8.0)
xx=np.linspace(0,10,1000)
yy=np.cos(xx**2.0/8.0)
# plt.plot(x_given,y_given,'o',label='given data')
# plt.plot(xx,yy,':',label='perfect')
# plt.plot('x')
# plt.ylabel('y')
# plt.legend(loc='best')
# interpolation
x_i=np.linspace(0,10,1000)
# -----Linear interpolation
f_linear=interp1d(x_given,y_given)
y_il=f_linear(x_i)
f_spline=interp1d(x_given,y_given,kind='cubic')
y_is=f_spline(x_i)
# plt.plot(x_given,y_given,'o')
# plt.plot(x_i,y_il,'-')
# plt.plot(x_i,y_is,'--')
# plt.plot(xx,yy,':')
# plt.legend(['data','linear','spline','perfect'],loc='best')
# plt.show()
x_gv=np.array([0.,1.,2.,3.,4.,5.])
y_gv=np.array([0,0.8,0.9,0.1,-0.8,-1.0])
x_p=np.linspace(-2,6.0,100)
p3=np.polyfit(x_gv,y_gv,3)
y_p=np.polyval(p3,x_p)
plt.plot(x_gv,y_gv,'o')
plt.plot(x_p,y_p,'-')
plt.legend(['data','polyfit'],loc='best')
plt.ylim(-2,2)
print(p3)
plt.show()
# General curve fits
def f(x,a,b,c):
return a*np.exp(-b*x)+c
x_g=np.linspace(0,4,50)
y_g=f(x_g,2.5,1.3,0.5)+0.2*np.random.normal(size=len(x_g))
params,extras=curve_fit(f,x_g,y_g)
print("c=%g, b=%g,c=%g" %(params[0],params[1],params[2]))
plt.plot(x_g,y_g,'o')
plt.plot(x_g,f(x_g,params[0],params[1],params[2]))
plt.legend(['data','fit'],loc='best')
plt.show()
| null | Bases/Section1.py | Section1.py | py | 1,605 | python | en | code | null | code-starcoder2 | 51 |
458971686 | from products.models import Product
from users.models import User
from .models import OrderItem, Order
from rest_framework import generics, status, permissions, pagination
from core.permissions import *
from rest_framework.response import Response
from .serializers import *
from django_filters.rest_framework import DjangoFilterBackend
from cart.models import Cart
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.shortcuts import render
from django.conf import settings
import stripe
import os
# ?Pagination Class
class Pagination(pagination.PageNumberPagination):
page_size = 100
page_query_param = 'p'
# ?Views
class UpdateOrder(generics.UpdateAPIView):
queryset = Order.objects.all()
serializer_class = UpdateOrderStatus
permission_classes = [IsStaff]
# *admin
class ListOrders(generics.ListAPIView):
queryset = Order.objects.all()
serializer_class = OrderSerializerListAdmin
permission_classes = [IsStaff]
filter_backends = [DjangoFilterBackend]
filterset_fields = ['status']
pagination_class = Pagination
# *General
class Checkout(generics.CreateAPIView):
permission_classes = [permissions.IsAuthenticated]
def post(self, request, *args, **kwargs):
try:
products = Cart.objects.all().filter(user=request.user.id)
total = 0
for i in products:
product = Product.objects.all().filter(id=i.product_id).first()
if product.deal:
total += product.special_price * i.quantity
else:
total += product.price * i.quantity
stripe.api_key = settings.STR_KEY
charge = stripe.PaymentIntent.create(
amount=int(total * 100),
currency='MXN',
description=f'Order from the user {request.user.id}',
payment_method=request.data.get('id', None),
confirm=True
)
if(charge['status'] == 'succeeded'):
user = request.user
direction = f"{user.calle} #{user.exterior_number} {f'#{user.interior_number}' if user.interior_number else ''}, {user.colonia} {user.postalcode} - {user.estado}"
order = {
"total": total,
"user": request.user.id,
"direction": direction
}
serializer = OrderSerializer(data=order)
if serializer.is_valid():
serializer.save()
for i in products:
total = 0
product = Product.objects.all().filter(id=i.product_id).first()
if product.deal:
total = product.special_price * i.quantity
else:
total = product.price * i.quantity
data = {
"total": total,
"order": serializer.data['id'],
"quantity": i.quantity,
"product": product.id
}
Product.objects.filter(id=product.id).update(
popularity=product.popularity + 20 * i.quantity, stock=product.stock - i.quantity)
# ?validating that other carts are in order related to the stock of the product
cartProducts = Cart.objects.all()
for c in cartProducts:
p = Product.objects.filter(id=product.id).first()
if p.stock == 0:
c.delete()
if c.quantity > p.stock:
Cart.objects.filter(id=c.id).update(
quantity=p.stock)
if product.stock > 0:
orderItem = OrderItemSerializer(data=data)
if orderItem.is_valid():
orderItem.save()
products.delete()
#? Sending Email
context = {
"UserName": request.user.nombre,
"orderId": serializer.data['id'],
"total": total,
"url": settings.URL_FOR_EMAIL_ORDER,
}
template = get_template('orderAlert.html')
content = template.render(context)
try:
email = EmailMultiAlternatives(
'Nueva Orden',
'Nueva Orden',
settings.EMAIL_HOST_USER,
[settings.EMAIL_HOST_USER]
)
email.attach_alternative(content, 'text/html')
email.send()
except Exception as e:
print(e)
return Response(status=status.HTTP_200_OK)
except Exception as e:
print(e)
return Response(data={'error': 'El pago falló, porfavor revisa que tengas suficientes fondos'}, status=status.HTTP_400_BAD_REQUEST)
class ListUserOrders(generics.ListAPIView):
permission_classes = [IsOwner, permissions.IsAuthenticated]
def get(self, request, *args, **kwargs):
orders = Order.objects.all().filter(user=request.user.id)
serializer = OrderSerializerList(orders, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
class RetrieveOrder(generics.RetrieveAPIView):
permission_class = [IsOwner, permissions.IsAuthenticated]
def get(self, request, *args, **kwargs):
id = self.kwargs['pk']
order = Order.objects.all().filter(id=id).first()
if(order.user == request.user):
serializer = OrderSerializerList(order)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class SolicitarFactura(generics.CreateAPIView):
def post(self, request, *args, **kwargs):
id = request.data['id']
if id:
order = Order.objects.all().filter(id=id).first()
if order is None:
return Response({'detail':'No encontrado'}, status = status.HTTP_404_NOT_FOUND)
if order.factura:
return Response({"status": "Esta orden ya a solicitado una factura, si no la haz recibido, porfavor contactate al correo de atención al cliente"}, status=status.HTTP_400_BAD_REQUEST)
if order.user != request.user:
return Response({"status": "No tienes permiso para solicitar Factura sobre esta orden"}, status=status.HTTP_401_UNAUTHORIZED)
data = request.data
user = request.user
context = {
"orderId": data['id'],
"UserName": data['nombre'],
"total":order.total,
"email":data['email'],
"direccion":f"{user.calle} #{user.exterior_number} {f'#{user.interior_number}' if user.interior_number else ''}, {user.colonia} {user.postalcode} - {user.estado}",
"rfc":data['rfc'],
"nombre":data['nombre'],
"userEmail": user.email
}
template = get_template('correo.html')
content = template.render(context)
try:
email = EmailMultiAlternatives(
f'El usuario {request.user.nombre} solicito una factura sobre el pedido #{order.id}',
'Factura',
settings.EMAIL_HOST_USER,
['raulemilianomirandagtz@gmail.com'],
)
except Exception as e:
print(e)
email.attach_alternative(content, 'text/html')
email.send()
Order.objects.all().filter(id=id).update(factura=True)
return Response({"status": "La solicitud de factura se realizó correctamente"}, status=status.HTTP_200_OK)
| null | core/orders/views.py | views.py | py | 8,279 | python | en | code | null | code-starcoder2 | 51 |
64128467 | # encoding: UTF-8
from __future__ import absolute_import, unicode_literals
from celery import shared_task
from dapps.celeryCommon import RetryableError, Retryable, getMappedAs
from dapps.sinaMaster.worker import thePROG
import dapps.sinaCrawler.tasks_Dayend as CTDayend
import crawler.crawlSina as sina
import crawler.producesSina as prod
from MarketData import *
import HistoryData as hist
import h5tar, h5py, pickle, bz2
from urllib.parse import quote, unquote
import sys, os, re, glob, stat, shutil, fnmatch
from datetime import datetime, timedelta
SYMBOL_LIST_HEADERSEQ="symbol,name,mktcap,nmc,turnoverratio,open,high,low,close,volume,amount,ticktime,changepercent"
EOL = "\r\n"
SINA_USERS_ROOT = '/mnt/data/hpwkspace/users'
MAPPED_USER, MAPPED_HOME = getMappedAs(homeDir = '/mnt/s') # master certainly take the local volume /mnt/s
if MAPPED_USER in [ None, 'nobody'] : MAPPED_USER = 'hpx'
SUBDIR_Reqs = 'reqs'
DIR_ARCHED_HOME = os.path.join(MAPPED_HOME, 'archived', 'sina')
WORKDIR_CACHE = '/tmp/sina_cache'
try:
os.mkdir(WORKDIR_CACHE)
except:
WORKDIR_CACHE = '/tmp'
'''
IDXs_to_COLLECT=[ # http://vip.stock.finance.sina.com.cn/mkt/#dpzs
'SH000001', # 上证指数
'SZ399001', # 深证成指
'SZ399005', # 中小板指
'SZ399006', # 创业板指
'SH000011', # 基金指数
]
ETFs_to_COLLECT=[ # asof 2020-12-08 top actives: http://vip.stock.finance.sina.com.cn/fund_center/index.html#jjhqetf
'SH510300','SH512880','SH510050','SH510900','SH518880','SZ159919','SH510500','SZ159934','SZ159949','SH512000',
'SH511660','SZ159920','SZ159995','SH588000','SH510330','SZ159915','SH515030','SH512760','SH512800','SZ159937',
'SH512660','SH512480','SH512690','SH515700','SH515050','SH515380','SH518800','SH512400','SZ159922','SH588080',
'SH512500','SZ159001','SH588050','SZ159003','SH510310','SH515000','SH513050','SH588090','SZ159992','SH510880',
'SH513090','SH512290','SZ159928','SZ159901','SZ159806','SH511260','SH512010','SH515220','SZ159952','SH511810',
'SH512710','SH510850','SH510510','SH512900','SZ159966','SH512170','SZ159994','SH511010','SH510180','SZ159996',
'SZ159801','SZ159967','SH510230','SH515210','SZ159993','SH515880','SZ159997','SH513100','SZ159807','SH512070',
'SZ159941','SH515330','SH511380','SH515260','SH512200','SH513500','SZ159905','SH512720','SZ159820','SH512980',
'SH515650','SH515800','SH515560','SH511690','SH515770','SH510760','SH515750','SZ159819','SZ159948','SH512100',
'SH512670','SZ159813','SH512700','SZ159977','SH510710','SH510630','SZ159939','SH510580','SH510350','SZ159968',
'SZ159902','SH512680','SH512910','SZ159998','SH513300','SZ159816','SH512090','SH510100','SZ159972','SH512160',
'SZ159980','SH515530','SH512580','SH515630','SZ159938','SZ159811','SZ159985','SH515390','SZ159929','SH515580',
'SH515070','SH510800','SH510600','SH511180','SH515980','SZ159808','SH512510','SH510390','SH510150','SH512730'
]
'''
IDXs_to_COLLECT = prod.listAllIndexs()
ETFs_to_COLLECT = prod.listAllETFs()
SYMBOLS_WithNoMF = IDXs_to_COLLECT + ETFs_to_COLLECT
TASK_TIMEOUT_DownloadToday = timedelta(minutes=60)
BATCHSIZE_DownloadToday = 500
TODAY_YYMMDD = None
@shared_task
def add(x, y):
sleep(30)
return x + y
@shared_task
def mul(x, y):
sleep(30)
return x * y
@shared_task
def xsum(numbers):
return sum(numbers)
import math
__totalAmt1W =1
__dtDummyOpen = datetime.strptime('20000101T09:30:00', '%Y%m%dT%H:%M:%S')
def R_activity(item):
# ret = item['amount'] / __totalAmt1W
# if ret >0.0:
# ret = 10* math.sqrt(math.sqrt(ret))
# if item['turnoverratio'] >0.2 :
# ret += math.sqrt(math.sqrt(item['turnoverratio']))
# else: ret /=2
DailizeRatio_tr =1
if 'ticktime' in item and isinstance(item['ticktime'], str) :
DailizeRatio_tr = datetime.strptime('20000101T' + item['ticktime'], '%Y%m%dT%H:%M:%S').replace(year=__dtDummyOpen.year,month=__dtDummyOpen.month,day=__dtDummyOpen.day) - __dtDummyOpen
DailizeRatio_tr = 5.5*60*60 / DailizeRatio_tr.seconds if DailizeRatio_tr.seconds >0 else 1
ret = min(8, item['turnoverratio'] * DailizeRatio_tr)
if (item['amount'] * DailizeRatio_tr) <1.0e8 : ret =0 # skip those amount less than 50M
ret += min(7, math.sqrt(item['amount'] / __totalAmt1W)) # /10
return ret
def _writeCsv(f, sybmolLst, columeline=None) :
if not columeline: columeline = SYMBOL_LIST_HEADERSEQ
f.write(columeline + EOL)
for i in sybmolLst:
line = ','.join([str(i[k]) for k in columeline.split(',')]) + EOL
f.write(line)
def __rmfile(fn) :
try :
os.remove(fn)
except:
pass
def _topN(topNum =500, lstResult=None):
if lstResult is None:
lstResult = listAllSymbols()
topActs = list(filter(lambda x: not 'SZ3' in x['symbol'] and not 'SH68' in x['symbol'], lstResult)) # 跳过创业、科创板
if topNum<=0:
topNum = min(500, int(len(topActs)/50) *10)
del topActs[topNum:]
return topActs
# ===================================================
@shared_task(bind=True, base=Retryable)
def listAllSymbols(self):
lstSHZ = []
fnCachedLst = os.path.join(MAPPED_HOME, 'hpx_publish', 'lstSHZ_%s.pkl.bz2' % datetime.now().strftime('%Y%m%d'))
try :
st = os.stat(fnCachedLst)
ctime = datetime.fromtimestamp(st.st_ctime)
if st.st_size >1000 and (ctime.isoweekday() >5 or ctime.hour >=16):
with bz2.open(fnCachedLst, 'rb') as f:
lstSHZ = pickle.load(f)
except Exception as ex:
pass
if len(lstSHZ) <=2000:
lstSH, lstSZ = prod.listAllSymbols(thePROG)
if len(lstSH) <= 0 or len(lstSZ) <= 0:
raise RetryableError(456, "empty SH[%s] or empty SH[%s] fetched" %(len(lstSH), len(lstSZ)))
lstSHZ = {} # temporarily via dict
for i in lstSH + lstSZ:
lstSHZ[i['symbol']] =i
lstSHZ = list(lstSHZ.values())
totalAmt1W=0
for i in lstSHZ:
totalAmt1W += i['amount'] /10000.0
if totalAmt1W >1.0: # for R_activity()
global __totalAmt1W
__totalAmt1W = totalAmt1W
noneST = list(filter(lambda x: not 'ST' in x['name'], lstSHZ))
noneST.sort(key=R_activity)
noneST.reverse()
STs = list(filter(lambda x: 'ST' in x['name'], lstSHZ))
STs.sort(key=R_activity)
STs.reverse()
try:
topActs = _topN(800, noneST)
fnTopActs = os.path.join(MAPPED_HOME, 'hpx_archived', 'sina', 'topAct%d_%s.csv.bz2' % (len(topActs), datetime.now().strftime('%Y%m%dT%H%M')))
with bz2.open(fnTopActs, 'wt', encoding='utf-8') as f:
_writeCsv(f, topActs)
except :
pass
lstSHZ = noneST + STs
for fn in glob.glob(os.path.join(MAPPED_HOME, 'hpx_publish') + "/lstSHZ_*.pkl.bz2") :
try :
os.remove(fn)
except Exception as ex:
pass
try:
with bz2.open(fnCachedLst, 'wb') as f:
f.write(pickle.dumps(lstSHZ))
except :
thePROG.warn('listAllSymbols() failed to write %s' % fnCachedLst)
try:
lstArch = os.path.join(MAPPED_HOME, 'hpx_archived', 'sina', 'lstSHZ_%s.csv.bz2' % datetime.now().strftime('%Y%m%d'))
with bz2.open(lstArch, 'wt', encoding='utf-8') as f:
_writeCsv(f, lstSHZ)
except :
thePROG.warn('listAllSymbols() failed to write %s' % lstArch)
return lstSHZ
# csvNoneST = SYMBOL_LIST_HEADERSEQ + EOL
# for i in noneST:
# csvNoneST += ','.join([str(i[k]) for k in SYMBOL_LIST_HEADERSEQ.split(',')]) +EOL
# csvSTs = SYMBOL_LIST_HEADERSEQ + EOL
# for i in STs:
# csvSTs += ','.join([str(i[k]) for k in SYMBOL_LIST_HEADERSEQ.split(',')]) +EOL
# return csvNoneST, csvSTs
# ===================================================
@shared_task(bind=True, base=Retryable, max_retries=5)
def commitToday(self, dictArgs) : # urgly at the parameter list
'''
in order to chain:
import celery
import dapps.sinaMaster.tasks as mt
import dapps.sinaCrawler.tasks_Dayend as ct
s3 = celery.chain(ct.downloadToday.s('SZ000005'), mt.commitToday.s())
s3().get()
'''
if dictArgs is None:
thePROG.warn('commitToday() None dictArgs, prev-req might be cancelled')
return
if not isinstance(dictArgs, dict) or len(dictArgs) <=0:
thePROG.error('commitToday() invalid dictArgs: %s' % str(dictArgs))
return
login, asofYYMMDD = 'hpx01', datetime.now().strftime('%Y%m%d')
login = dictArgs.get('login', login)
asofYYMMDD = dictArgs.get('asofYYMMDD', asofYYMMDD)
symbol = dictArgs.get('symbol', None)
fnJsons = dictArgs.get('fnJsons', [])
fnSnapshot = dictArgs.get('fnSnapshot', None)
fnTcsv = dictArgs.get('fnTcsv', None)
lastDays = dictArgs.get('lastDays', [])
''' sample value:
fnJsons = ['SZ000002_KL1d20201202.json', 'SZ000002_MF1d20201202.json', 'SZ000002_KL5m20201202.json', 'SZ000002_MF1m20201202.json']
fnSnapshot = 'SZ000002_sns.h5';
~{HOME}
|-- archived -> ../archived
`-- hpx_template -> /home/wkspaces/hpx_template
2021-01-03 10:05:03,683: DEBUG/ForkPoolWorker-1] commitToday() archived /mnt/data/hpwkspace/users/hpx/hpx_publish/SZ300422_day20201228.tcsv by[hpx] into /mnt/data/hpwkspace/users/master/archived/sina/SinaMDay_20201228.h5t
'''
if not symbol:
thePROG.error('commitToday() invalid dictArgs: %s' % str(dictArgs))
return
if '@' in login : login = login[:login.index('@')]
if ':' in login : login = login[:login.index(':')]
pubDir = os.path.join(SINA_USERS_ROOT, login, 'hpx_publish')
# pubDir = '/mnt/s/hpx_publish' # test hardcode
# DIR_ARCHED_HOME = '/tmp/arch_test' # test hardcode
try:
os.mkdir(os.path.join(DIR_ARCHED_HOME, 'snapshots'))
os.chmod(dirReqs, stat.S_IRWXU | stat.S_IRWXG |stat.S_IROTH )
except: pass
if TODAY_YYMMDD and asofYYMMDD < TODAY_YYMMDD:
# this symbol must be frozen today
thePROG.warn('commitToday() archiving %s_%s sounds not open, dictArgs: %s, cleaning %s' % (symbol, asofYYMMDD, str(dictArgs), pubDir))
for fn in fnJsons + [fnTcsv, fnSnapshot]:
if fn is None: continue
srcpath = os.path.join(pubDir, fn)
__rmfile(srcpath)
asofYYMMDD = TODAY_YYMMDD # to clear the req of today
else:
thePROG.debug('commitToday() archiving %s_%s dictArgs: %s from %s to %s' % (symbol, asofYYMMDD, str(dictArgs), pubDir, DIR_ARCHED_HOME))
# step 1. zip the JSON files
for fn in fnJsons:
srcpath = os.path.join(pubDir, fn)
m = re.match(r'%s_([A-Za-z0-9]*)%s.json' %(symbol, asofYYMMDD), os.path.basename(srcpath))
if not m : continue
evtShort = m.group(1)
try :
destpath = os.path.join(DIR_ARCHED_HOME, 'Sina%s_%s.h5t' % (evtShort, asofYYMMDD) )
if evtShort in ['Sum'] :
destpath = os.path.join(DIR_ARCHED_HOME, 'SinaMDay_%s.h5t' % asofYYMMDD )
if h5tar.tar_utf8(destpath, srcpath, baseNameAsKey=True) :
thePROG.debug('commitToday() archived %s into %s' %(srcpath, destpath))
__rmfile(srcpath)
else:
thePROG.error('commitToday() failed to archived %s into %s' %(srcpath, destpath))
except Exception as ex:
thePROG.logexception(ex, 'commitToday() archiving[%s->%s] error' % (srcpath, destpath))
# step 2. zip the Tcsv file
srcpath = os.path.join(pubDir, fnTcsv)
destpath = os.path.join(DIR_ARCHED_HOME, 'SinaMDay_%s.h5t' % asofYYMMDD )
if h5tar.tar_utf8(destpath, srcpath, baseNameAsKey=True) :
thePROG.debug('commitToday() archived %s by[%s] into %s' %(srcpath, login, destpath))
__rmfile(srcpath)
else:
thePROG.error('commitToday() failed to archived %s by[%s] into %s' %(srcpath, login, destpath))
# step 3. append the snapshots
if fnSnapshot and len(fnSnapshot)>0:
srcpath = os.path.join(pubDir, fnSnapshot)
'''
destpath = os.path.join(DIR_ARCHED_HOME, 'SNS_%s.h5' % asofYYMMDD)
try :
gns = []
with h5py.File(destpath, 'a') as h5w:
# step 3.1, copy the new SNS into the dest h5f
with h5py.File(srcpath, 'r') as h5r:
for gn in h5r.keys():
if not symbol in gn: continue
g = h5r[gn]
if not 'desc' in g.attrs.keys() or not 'pickled market state' in g.attrs['desc'] : continue
gdesc = g.attrs['desc']
if gn in h5w.keys(): del h5w[gn]
# Note that this is not a copy of the dataset! Like hard links in a UNIX file system, objects in an HDF5 file can be stored in multiple groups
# So, h5w[gn] = g doesn't work because across different files
# go = h5w.create_group(gn)
h5r.copy(g.name, h5w) # note the destGroup is the parent where the group want to copy under-to
go = h5w[gn]
gns.append(gn)
thePROG.debug('commitToday() added snapshot[%s] of %s into %s' % (','.join(gns), srcpath, destpath))
except Exception as ex:
thePROG.logexception(ex, 'commitToday() snapshot[%s->%s] error' % (srcpath, destpath))
'''
__rmfile(srcpath)
# step 4, delete the request file and record
dirReqs = os.path.join(DIR_ARCHED_HOME, SUBDIR_Reqs)
# fnReq = os.path.join(dirReqs, '%s_%s.tcsv.bz2' % (asofYYMMDD, symbol))
# __rmfile(fnReq)
# thePROG.debug('commitToday() removed %s' % fnReq)
dictDownloadReqs = _loadDownloadReqs(dirReqs)
if asofYYMMDD in dictDownloadReqs.keys():
dictToday = dictDownloadReqs[asofYYMMDD]
if symbol in dictToday.keys():
reqNode = dictToday[symbol]
stampNow = datetime.now()
taskId, stampIssued, tn = reqNode['taskId'], reqNode['stampIssued'], reqNode['taskFn']
reqNode['stampCommitted'] = stampNow
__rmfile(tn)
thePROG.info('commitToday() dictDownloadReqs[%s][%s] task[%s] took %s by[%s], deleted %s' % (asofYYMMDD, symbol, taskId, stampNow - stampIssued, login, tn))
nleft = len(dictToday)
c = sum([1 if not v['stampCommitted'] else 0 for v in dictToday.values() ])
thePROG.debug('commitToday() dictDownloadReqs[%s] has %d/%d onging' % (asofYYMMDD, c, nleft))
_saveDownloadReqs(dirReqs)
# ===================================================
# RETRY_DOWNLOAD_INTERVAL = timedelta(hours=1)
RETRY_DOWNLOAD_INTERVAL = timedelta(minutes=30)
# ===================================================
@shared_task(bind=True, ignore_result=True, expires=60)
def schChkRes_Crawlers(self, asofYYMMDD =None): # asofYYMMDD ='20201231'):
global MAPPED_HOME, TODAY_YYMMDD
if asofYYMMDD:
TODAY_YYMMDD = asofYYMMDD
stampNow = datetime.now()
if not TODAY_YYMMDD:
TODAY_YYMMDD = (stampNow-timedelta(hours=9)).strftime('%Y%m%d')
dirReqs = os.path.join(DIR_ARCHED_HOME, SUBDIR_Reqs)
thePROG.debug('schChkRes_Crawlers() refreshing tasks of downloadTodays[%s]' % TODAY_YYMMDD)
__refreshBatch_DownloadToday(dirReqs, TODAY_YYMMDD)
from dapps.sinaCrawler.worker import worker as crawler
crawlers = crawler.control.ping(timeout=2.0, queue='crawler')
crawlers = [ list(c.keys())[0] for c in crawlers ]
thePROG.info('schChkRes_Crawlers() found %d crawlers: %s' % (len(crawlers), ','.join(crawlers) ) )
'''
cacheFiles = [ 'SinaMF1m_%s.h5t' %i for i in yymmddToCache]
for c in crawlers:
q = c.split('@')[0]
if not q or len(q) <=0: continue
r = CTDayend.fetchArchivedFiles.apply_async(args=[cacheFiles], queue=q)
thePROG.info('schDo_pitchArchiedFiles() called crawler[%s].fetchArchivedFiles: %s' % (q, ','.join(cacheFiles)))
'''
# ===================================================
__dictDownloadReqs = None
def _loadDownloadReqs(dirReqs) :
global __dictDownloadReqs
if not __dictDownloadReqs:
fn = os.path.join(dirReqs, 'dictDownloadReqs.pkl.bz2')
try:
with bz2.open(fn, 'rb') as f:
__dictDownloadReqs = pickle.load(f)
except Exception as ex:
__dictDownloadReqs ={}
__rmfile(fn)
return __dictDownloadReqs
def _saveDownloadReqs(dirReqs):
global __dictDownloadReqs
if not __dictDownloadReqs: return
fn = os.path.join(dirReqs, 'dictDownloadReqs.pkl.bz2')
try:
with bz2.open(fn, 'wb') as f:
f.write(pickle.dumps(__dictDownloadReqs))
except Exception as ex:
pass
# ===================================================
def __refreshBatch_DownloadToday(dirReqs, TODAY_YYMMDD):
dictDownloadReqs = _loadDownloadReqs(dirReqs)
if not dictDownloadReqs or not TODAY_YYMMDD or not TODAY_YYMMDD in dictDownloadReqs.keys():
thePROG.debug('__refreshBatch_DownloadToday() no active downloadToday[%s]' %TODAY_YYMMDD)
return
dictToday = dictDownloadReqs[TODAY_YYMMDD]
thePROG.debug('__refreshBatch_DownloadToday() %d actives in downloadToday[%s]' %(len(dictToday), TODAY_YYMMDD))
todels, bDirty = [], False
reqsPending = []
stampNow = datetime.now()
for k, v in dictToday.items():
if not v or not 'task' in v.keys() or not v['task']:
todels.append(k)
continue
task = v['task']
try :
timelive = stampNow - v['stampIssued']
if v['stampCommitted']:
todels.append(k)
thePROG.info('__refreshBatch_DownloadToday() downloadToday[%s]%s committed, duration %s, removed from dictToday' %(k, task.id, v['stampCommitted']-v['stampIssued']))
continue
if not v['stampReady'] and task.ready():
v['stampReady'] = stampNow
thePROG.debug('__refreshBatch_DownloadToday() downloadToday[%s]%s:%s succ[%s], took %s' %(k, task.id, task.state, task.successful(), timelive))
continue
if timelive > TASK_TIMEOUT_DownloadToday and task.state in ['PENDING', 'REVOKED']:
todels.append(k)
thePROG.warn('__refreshBatch_DownloadToday() downloadToday[%s]%s:%s took %s timeout, revoking[%s] and retry' %(k, task.id, task.state, timelive, task.parent.id))
task.parent.revoke() # we only revoke the first in the chain here, always let commitToday go if its prev steps have been completed
continue
reqsPending.append(v['taskFn'])
except Exception as ex:
thePROG.logexception(ex, '__refreshBatch_DownloadToday() checking task of %s' % (k))
if len(todels) >0:
bDirty = True
thePROG.info('__refreshBatch_DownloadToday() clearing %s keys: %s' % (len(todels), ','.join(todels)))
for k in todels:
del dictToday[k]
cTasksToAdd = BATCHSIZE_DownloadToday - len(reqsPending)
if cTasksToAdd <=0:
thePROG.debug('__refreshBatch_DownloadToday() %d pendings[%s ~ %s] hit max %d, no more add-in' % (len(reqsPending), reqsPending[0], reqsPending[-1], BATCHSIZE_DownloadToday))
return
Tname_batchStart = os.path.basename(max(reqsPending)) if len(reqsPending) >0 else ''
allfiles = hist.listAllFiles(dirReqs, depthAllowed=1)
taskfiles, potentialRetries = [], []
for fn in allfiles:
bn = os.path.basename(fn)
if not fnmatch.fnmatch(bn, 'T%s.*.pkl.bz2' % TODAY_YYMMDD) :
continue
if bn <= Tname_batchStart and (len(potentialRetries) + len(taskfiles)) < cTasksToAdd and not fn in reqsPending:
potentialRetries.append(fn)
continue
taskfiles.append(fn)
taskfiles.sort()
potentialRetries.sort()
newissued = []
prefix2cut = DIR_ARCHED_HOME +'/'
prefixlen = len(prefix2cut)
for tn in taskfiles + potentialRetries:
bn = os.path.basename(tn)
symbol = bn.split('.')[2]
exclMF = symbol in SYMBOLS_WithNoMF
fnTask = tn[prefixlen:] if prefix2cut == tn[: prefixlen] else tn
wflow = CTDayend.downloadToday.s(symbol, fnPrevTcsv = fnTask, excludeMoneyFlow=exclMF) | commitToday.s()
task = wflow()
dictToday[symbol] = {
'symbol': symbol,
'taskFn': tn,
'task': task,
'taskId': task.id,
'stampIssued': datetime.now(),
'stampReady': None,
'stampCommitted': None
}
newissued.append(symbol)
if len(newissued) >= cTasksToAdd: break
thePROG.info('__refreshBatch_DownloadToday() fired %d/%d new requests: %s' % (len(newissued), len(taskfiles), ','.join(newissued)))
if len(newissued) >0 :
bDirty = True
elif len(dictToday) <=0:
del dictDownloadReqs[TODAY_YYMMDD]
bDirty = True
thePROG.info('__refreshBatch_DownloadToday() all DownloadReqs[%s] done, removed' % (TODAY_YYMMDD))
if bDirty:
_saveDownloadReqs(dirReqs)
# ===================================================
__lastYYMMDDs, __dtLatestQueried=[], None
@shared_task(bind=True, base=Retryable)
def latestOpenDays(self, nLastDays =7):
global __lastYYMMDDs, __dtLatestQueried
needQuery, stampNow = False, datetime.now()
if not __dtLatestQueried or not __lastYYMMDDs or len(__lastYYMMDDs) < nLastDays:
needQuery = True
elif (stampNow - __dtLatestQueried) > timedelta(hours=1):
needQuery = True
if not needQuery:
return __lastYYMMDDs[: min(nLastDays, len(__lastYYMMDDs))]
global TODAY_YYMMDD
__lastYYMMDDs = prod.determineLastDays(thePROG, nLastDays =30)
__dtLatestQueried = datetime.now()
if len(__lastYYMMDDs) >0:
TODAY_YYMMDD = __lastYYMMDDs[0]
else :
TODAY_YYMMDD = (stampNow-timedelta(hours=9)).strftime('%Y%m%d')
return __lastYYMMDDs[: min(nLastDays, len(__lastYYMMDDs))]
# ===================================================
@shared_task(bind=True, base=Retryable)
def schKickOff_DownloadToday(self):
global TODAY_YYMMDD
# lastYYMMDDs = prod.determineLastDays(thePROG, nLastDays =7)
# if len(lastYYMMDDs) <=0:
# return
# TODAY_YYMMDD = lastYYMMDDs[0]
lastYYMMDDs = latestOpenDays(nLastDays =7)
if len(lastYYMMDDs) <=0:
return
# DIR_ARCHED_HOME = '/mnt/e/AShareSample/hpx_archived/sina' # TEST CODE
dirReqs = os.path.join(DIR_ARCHED_HOME, SUBDIR_Reqs)
try:
os.mkdir(dirReqs)
os.chmod(dirReqs, stat.S_IRWXU | stat.S_IRWXG |stat.S_IRWXO )
shutil.chown(dirReqs, group ='hpx')
except: pass
dictDownloadReqs = _loadDownloadReqs(dirReqs)
allSHZ = { x['symbol']: x for x in listAllSymbols() } # convert to dict
thePROG.info('schKickOff_DownloadToday() listAllSymbols got %d symbols and last trade-days: %s' % (len(allSHZ), ','.join(lastYYMMDDs)))
if len(allSHZ) <=2000:
raise RetryableError(401, 'incompleted symbol list')
if not TODAY_YYMMDD in __dictDownloadReqs.keys():
# TODO cancel dictDownloadReqs[TODAY_YYMMDD]
dictDownloadReqs[TODAY_YYMMDD] = {}
else:
for v in dictDownloadReqs[TODAY_YYMMDD].values():
try :
if 'task' in v.keys() or not v['task']: continue
task = v['task']
task.parent.revoke() # we only revoke the first in the chain here, always let commitToday go if its prev steps have been completed
except: pass
_saveDownloadReqs(dirReqs)
cTasks =0
for symbol in ETFs_to_COLLECT + list(allSHZ.keys()) + IDXs_to_COLLECT:
cTasks += 1
rfnRequest = os.path.join(SUBDIR_Reqs, 'T%s.%04d.%s.pkl.bz2' % (TODAY_YYMMDD, cTasks, symbol))
fullfnRequest = os.path.join(DIR_ARCHED_HOME, rfnRequest)
excludeMoneyFlow = symbol in SYMBOLS_WithNoMF
try:
st = os.stat(fullfnRequest)
thePROG.debug('schKickOff_DownloadToday() %s already exists' % rfnRequest)
continue
except: pass
thePROG.debug('schKickOff_DownloadToday() generating request-file %s' % rfnRequest)
alllines = prod.readArchivedDays(thePROG, DIR_ARCHED_HOME, symbol, lastYYMMDDs[1:])
todayOverview = allSHZ[symbol] if symbol in allSHZ.keys() else {}
if 'mktcap' in todayOverview.keys() and 'close' in todayOverview.keys():
ovclose = float(todayOverview['close'])
if ovclose > 0.01:
todayOverview['mktVolCap10K'] = int(float(todayOverview['mktcap'])) / ovclose
# no tcsv data in the nLastDays doesn't mean it has no trades today:
# if len(alllines) <= 100:
# thePROG.debug('schKickOff_DownloadToday() skip empty request %s size %d' % (rfnRequest, len(alllines))
# continue
try:
with bz2.open(fullfnRequest, 'wb') as f:
f.write(pickle.dumps({'archDays':alllines, 'ov': todayOverview}))
try:
shutil.chown(fullfnRequest, group ='hpx')
os.chmod(fullfnRequest, stat.S_IREAD|stat.S_IWRITE|stat.S_IRGRP|stat.S_IWGRP|stat.S_IROTH )
except: pass
thePROG.debug('schKickOff_DownloadToday() generated task-file %s' % fullfnRequest)
except Exception as ex:
thePROG.logexception(ex, 'schKickOff_DownloadToday() write %s' % fullfnRequest)
'''
with bz2.open(fullfnRequest, 'wt', encoding='utf-8') as f:
f.write(alllines)
try:
shutil.chown(fullfnRequest, group ='hpx')
os.chmod(fullfnRequest, stat.S_IREAD|stat.S_IWRITE|stat.S_IRGRP|stat.S_IWGRP|stat.S_IROTH )
except: pass
thePROG.debug('schKickOff_DownloadToday() generated task-file %s' % rfnRequest)
'''
__refreshBatch_DownloadToday(dirReqs, TODAY_YYMMDD)
'''
# ===================================================
@shared_task(bind=True, base=Retryable)
def schDo_pitchArchiedFiles(self):
listAllSymbols()
nLastDays, lastDays = 7, []
yymmddToday = (stampNow-timedelta(hours=9)).strftime('%Y%m%d')
yymmddToday = datetime.now().strftime('%Y%m%d')
playback = prod.SinaMux(thePROG)
httperr, _, lastDays = playback.loadOnline(EVENT_KLINE_1DAY, IDXs_to_COLLECT[0], nLastDays+3)
lastDays.reverse()
yymmddToCache = []
for i in lastDays:
yymmdd = i.asof.strftime('%Y%m%d')
if yymmdd >= yymmddToday:
continue
yymmddToCache.append(yymmdd)
if len(yymmddToCache) >= nLastDays:
break
if len(yymmddToCache) <=0:
return
from dapps.sinaMaster.worker import worker as wkr
crawlers = wkr.control.ping(timeout=2.0, queue='crawler')
crawlers = [ list(c.keys())[0] for c in crawlers ]
cacheFiles = [ 'SinaMF1m_%s.h5t' %i for i in yymmddToCache]
for c in crawlers:
q = c.split('@')[0]
if not q or len(q) <=0: continue
r = CTDayend.fetchArchivedFiles.apply_async(args=[cacheFiles], queue=q)
thePROG.info('schDo_pitchArchiedFiles() called crawler[%s].fetchArchivedFiles: %s' % (q, ','.join(cacheFiles)))
'''
# ===================================================
@shared_task(bind=True, max_retries=0, compression='bzip2')
def readArchivedDays(self, symbol, YYYYMMDDs):
return prod.readArchivedDays(thePROG, DIR_ARCHED_HOME, symbol, YYYYMMDDs)
# ===================================================
@shared_task(bind=True, max_retries=0, compression='bzip2')
def readAchivedSofar(self, symbol):
readtxn=''
# mdlines
mdlines = ''
fnList = glob.glob(os.path.join(MAPPED_HOME, 'archived', 'sina', 'SinaMDay_*.h5t'))
if len(fnList) >0 :
fnList.sort()
fnArch = fnList[-1]
recentYYMMDD = os.path.basename(fnArch)[len('SinaMDay_'): -len('.h5t')]
memName = '%s_day%s.tcsv' %(symbol, recentYYMMDD)
try :
mdlines = h5tar.read_utf8(fnArch, memName)
readtxn += '%s(%dB)@%s,' % (memName, len(mdlines), fnArch)
except:
thePROG.error('readAchivedSofar() failed to read %s from %s' % (memName, fnArch))
mux = prod.SinaMux(thePROG)
mux.setSymbols([symbol])
# kl1dlines
fnList = glob.glob(os.path.join(MAPPED_HOME, 'archived', 'sina', 'SinaKL1d_*.h5t'))
if len(fnList) >0 :
fnList.sort()
fnArch = fnList[-1]
mux.loadJsonH5t(EVENT_KLINE_1DAY, symbol, fnArch)
readtxn += '%s@%s,' % (symbol, fnArch)
# mf1dlines
fnList = glob.glob(os.path.join(MAPPED_HOME, 'archived', 'sina', 'SinaMF1d_*.h5t'))
if len(fnList) >0 :
fnList.sort()
fnArch = fnList[-1]
mux.loadJsonH5t(EVENT_MONEYFLOW_1DAY, symbol, fnArch)
readtxn += '%s@%s,' % (symbol, fnArch)
tmpfn = os.path.join(WORKDIR_CACHE, 'tmprr%s_%s.tcsv' %(symbol, datetime.now().strftime('%m%dT%H%M%S')))
rec = thePROG.createApp(hist.TaggedCsvRecorder, filepath =tmpfn)
rec.registerCategory(EVENT_KLINE_1DAY, params={'columns': KLineData.COLUMNS})
rec.registerCategory(EVENT_MONEYFLOW_1DAY, params={'columns': MoneyflowData.COLUMNS})
while True:
try :
rec.doAppStep() # to flush the recorder
ev = next(mux)
if not ev: break
rec.pushRow(ev.type, ev.data)
except StopIteration:
break
except Exception as ex:
thePROG.logexception(ex)
break
except :
break
for i in range(10): rec.doAppStep() # to flush the recorder
thePROG.removeApp(rec)
tcsvlines=''
with open(tmpfn, 'r') as f:
tcsvlines = f.read()
__rmfile(tmpfn)
thePROG.info('readAchivedSofar() read from %s tmpfile[%s] deleted' % (readtxn, tmpfn))
return {
'symbol' : symbol,
'tcsv_lastday': mdlines,
'tcsv_1d': tcsvlines
} # take celery's compression instead of return bz2.compress(all_lines.encode('utf8'))
# ===================================================
@shared_task(bind=True, base=Retryable)
def readArchivedH5t(self, h5tFileName, memberNode):
if '.h5t' != h5tFileName[-4:]: h5tFileName+='.h5t'
pathname = os.path.join(MAPPED_HOME, 'archived', 'sina', h5tFileName)
pathname = '/tmp/sina_cache/' + h5tFileName
k = h5tar.quote(memberNode)
ret = None
try :
with h5py.File(pathname, 'r') as h5r:
if k in h5r.keys():
ret = h5r[k][()].tobytes()
if h5tar.GNAME_TEXT_utf8 in h5r.keys():
g = h5r[h5tar.GNAME_TEXT_utf8]
if k in g.keys():
ret = g[k][()].tobytes()
except Exception as ex:
thePROG.logexception(ex, 'readArchivedH5t() %s[%s]'% (h5tFileName, memberNode))
if ret and len(ret) > 0:
#typical compress-rate 1/8: ret = bz2.decompress(ret).decode('utf8')
thePROG.info('readArchivedH5t() read %s[%s] %dB'% (h5tFileName, memberNode, len(ret)))
else :
thePROG.error('readArchivedH5t() read %s[%s] failed: %s'% (h5tFileName, memberNode, ret))
return ret
# ===================================================
@shared_task(bind=True, base=Retryable)
def schDo_ZipWeek(self, asofYYMMDD =None):
global DIR_ARCHED_HOME
dtInWeek = None
try :
if isinstance(asofYYMMDD, str):
dtInWeek = datetime.strptime(asofYYMMDD, '%Y-%m-%d')
except:
dtInWeek = None
if not dtInWeek:
dtInWeek = datetime.now() - timedelta(days=5)
thePROG.debug('schDo_ZipWeek() start archiving the week of %s under %s' % (dtInWeek.strftime('%Y-%m-%d'), DIR_ARCHED_HOME))
fn, lst = prod.archiveWeek(DIR_ARCHED_HOME, None, dtInWeek, thePROG)
thePROG.info('schDo_ZipWeek() %s archived %s symbols'% (fn, len(lst)))
####################################
from time import sleep
if __name__ == '__main__':
thePROG.setLogLevel('debug')
schKickOff_DownloadToday()
exit(0)
# readAchivedSofar('SZ300913')
readArchivedDays('SZ300913', ['20210530', '20210531'])
# readArchivedH5t('SinaMF1m_20201222.h5t', 'SZ300913_MF1m20201222.json')
listAllSymbols()
# schKickOff_DownloadToday()
for i in range(20):
schChkRes_Crawlers('20201231')
sleep(10)
# nTop = 1000
# lstSHZ = topActives(nTop)
# with open(os.path.join(MAPPED_HOME, 'hpx_publish', 'top%s_%s' % (nTop, datetime.now().strftime('%Y%m%d'))) + '.csv', 'wb') as f:
# _writeCsv(f, lstSHZ)
# print(lstSHZ)
'''
symbol, asofYYMMDD = 'SZ002670', '20201204'
login = 'root@tc2.syscheme.com'
fnJsons = []
for evt in ['KL1d', 'MF1d', 'KL5m', 'MF1m']:
fnJsons.append('%s_%s%s.json' % (symbol, evt, asofYYMMDD))
today = {
'symbol': symbol,
'login': 'hxp01@test',
'asofYYMMDD': asofYYMMDD,
'fnSnapshot': '%s_sns%s.h5' % (symbol, asofYYMMDD),
'fnJsons': fnJsons,
'fnTcsv': '%s_day%s.tcsv' % (symbol, asofYYMMDD),
'lastDays': [
['20201204', 15.31, 17.5, 15.0, 15.5, 222133283.0],
['20201203', 15.98, 16.48, 15.5, 15.97, 176615259.0],
['20201202', 14.38, 14.98, 14.26, 14.98, 113319552.0],
['20201201', 12.41, 13.62, 11.77, 13.62, 163043226.0],
['20201130', 12.17, 12.72, 12.02, 12.38, 166906351.0]
]
}
commitToday(today)
'''
''' A test
import dapps.sinaCrawler.tasks_Dayend as ct
import dapps.sinaMaster.tasks_Archive as mt
c1 = ct.downloadToday.s('SZ000002') | mt.commitToday.s()
c1().get()
'''
| null | src/dapps/sinaMaster/tasks_Archive.py | tasks_Archive.py | py | 34,068 | python | en | code | null | code-starcoder2 | 51 |
529211345 | alternate_ir = 0.0
apd_Temperature = -9999
confidence_threshold = 1
depth_offset = 4.5
depth_units = 0.000250000011874363
digital_gain = 2
enable_ir_Reflectivity = 0.0
enable_max_usable_range = 0.0
error_polling_enabled = 1
frames_queue_size = 16
freefall_detection_enabled = 1
global_time_enabled = 0.0
host_performance = 0.0
humidity_temperature = 36.6105880737305
inter_cam_sync_mode = 0.0
invalidation_bypass = 0.0
ldd_temperature = 36.6820793151855
laser_power = 71
ma_temperature = 36.5593948364258
mc_temperature = 36.570125579834
min_distance = 190
noise_estimation = 0.0
noise_filtering = 4
post_processing_sharpening = 1
pre_processing_sharpening = 0.0
receiver_gain = 18
reset_camera_accuracy_health = 0.0
sensor_mode = 0.0
trigger_camera_cccuracy_health = 0.0
visual_preset = 5
zero_order_enabled = 0.0
stream_depth_format = 'Z16'
stream_fps = 30
stream_height = 768
stream_ir_format = 'Y8'
stream_width = 1024
| null | misc/const.py | const.py | py | 925 | python | en | code | null | code-starcoder2 | 51 |
60173586 | import re
if __name__ == '__main__':
print("<div style=\"margin:2em; background-color: #e0e0e0;\">", end="\n\n")
try:
lines = []
while True:
try:
line = input()
lines.append(line)
except EOFError:
break
lines = filter(lambda l: not re.match(r'^\s*$', l), lines)
print(f'<strong>↩PREREQUISITES↩</strong>', end='\n\n')
for line in lines:
print(f' * {line}', end='\n')
print(end='\n')
finally:
print("</div>", end="\n\n") | null | docs/data/learn/Bioinformatics/input/prereq_macro_block/input/Main.py | Main.py | py | 576 | python | en | code | null | code-starcoder2 | 51 |
338668698 | from django.shortcuts import render, redirect
from .models import *
from df_user import user_decorator
from django.http import JsonResponse
# Create your views here.
@user_decorator.login
def cart(request):
user_id = request.session.get('user_id')
carts = CartInfo.objects.filter(user_id=int(user_id))
context = {'carts': carts}
return render(request, 'df_cart/cart.html', context)
@user_decorator.login
def add(request, gid, count):
uid = request.session.get('user_id')
gid = int(gid)
count = int(count)
carts = CartInfo.objects.filter(user_id=uid, goods_id=gid)
if len(carts) >= 1:
cart0 = carts[0]
cart0.count += count
else:
cart0 = CartInfo()
cart0.user_id = uid
cart0.goods_id = gid
cart0.count = count
cart0.save()
if request.is_ajax():
count = CartInfo.objects.filter(user=request.session['user_id']).count()
return JsonResponse({'count': count})
else:
return redirect('/cart/')
@user_decorator.login
def edit(request, cart_id, count):
try:
cart0 = CartInfo.objects.get(pk=int(cart_id))
cart0.count = int(count)
data = {'ok': 0}
cart0.save()
except Exception as e:
data = {'ok': count}
return JsonResponse(data)
@user_decorator.login
def delete(request, cart_id):
try:
cart0 = CartInfo.objects.get(pk=int(cart_id))
cart0.delete()
data = {'ok': 1}
except Exception as e:
data = {'ok': 0}
return JsonResponse(data)
| null | df_cart/views.py | views.py | py | 1,549 | python | en | code | null | code-starcoder2 | 51 |
114539526 | import sys
import os
import hashlib
import urllib.request
def get_hash(name):
readsize = 64 * 1024
with open(name, 'rb') as f:
size = os.path.getsize(name)
data = f.read(readsize)
f.seek(-readsize, os.SEEK_END)
data += f.read(readsize)
return hashlib.md5(data).hexdigest()
file_path=sys.argv[1]
file_path_list=file_path.split("\\")
file_name=file_path_list[len(file_path_list)-1]
file_name_list=file_name.split('.')
file_extension=file_name_list[len(file_name_list)-1]
if file_extension not in ['webm', 'mkv', 'flv', 'vob', 'ogv', 'ogg', 'avi', 'mov', 'qt', 'wmv', 'yuv', 'rmvb', 'asf', 'mp4', 'mpg', 'mp2', 'mpeg', 'mpe', 'mpv', 'm2v', 'm4v', '3gp', '3gp2', '264']:
print('Not valid video file')
input()
else:
hash=get_hash(file_name)
req = urllib.request.Request(
url='http://api.thesubdb.com/?action=download&hash=' + hash + '&language=en',
data=None,
headers={
'User-Agent': 'SubDB/1.0 (Pyrrot/0.1; http://github.com/jrhames/pyrrot-cli)'
}
)
f = urllib.request.urlopen(req)
file=open('subtitle.srt', 'w')
file.write(f.read().decode('utf-8'))
file.close() | null | main.py | main.py | py | 1,222 | python | en | code | null | code-starcoder2 | 51 |
400967713 | #!/usr/bin/env python
import rospy
import numpy
import tf
import tf2_ros
import geometry_msgs.msg
def message_from_transform(T):
msg = geometry_msgs.msg.Transform()
q = tf.transformations.quaternion_from_matrix(T)
translation = tf.transformations.translation_from_matrix(T)
msg.translation.x = translation[0]
msg.translation.y = translation[1]
msg.translation.z = translation[2]
msg.rotation.x = q[0]
msg.rotation.y = q[1]
msg.rotation.z = q[2]
msg.rotation.w = q[3]
return msg
def publish_transforms():
T1 = tf.transformations.concatenate_matrices(
tf.transformations.translation_matrix((1.0,1.0,0.0)),
tf.transformations.quaternion_matrix(
tf.transformations.quaternion_from_euler(1.0,1.0,1.0)
)
)
T1_stamped = geometry_msgs.msg.TransformStamped()
T1_stamped.header.stamp = rospy.Time.now()
T1_stamped.header.frame_id = "world"
T1_stamped.child_frame_id = "F1"
T1_stamped.transform = message_from_transform(T1)
br.sendTransform(T1_stamped)
T2 = tf.transformations.concatenate_matrices(
tf.transformations.translation_matrix((1.0,0.0,0.0)),
tf.transformations.quaternion_matrix(
tf.transformations.quaternion_about_axis(1.57,(1,0,0))
)
)
T2_stamped = geometry_msgs.msg.TransformStamped()
T2_stamped.header.stamp = rospy.Time.now()
T2_stamped.header.frame_id = "F1"
T2_stamped.child_frame_id = "F2"
T2_stamped.transform = message_from_transform(T2)
br.sendTransform(T2_stamped)
# T2_inverse = tf.transformations.inverse_matrix(T2)
# T3_stamped = geometry_msgs.msg.TransformStamped()
# T3_stamped.header.stamp = rospy.Time.now()
# T3_stamped.header.frame_id = "F2"
# T3_stamped.child_frame_id = "F3"
# T3_stamped.transform = message_from_transform(T2_inverse)
# br.sendTransform(T3_stamped)
# T1_inverse = tf.transformations.inverse_matrix(T1)
# T4_stamped = geometry_msgs.msg.TransformStamped()
# T4_stamped.header.stamp = rospy.Time.now()
# T4_stamped.header.frame_id = "F3"
# T4_stamped.child_frame_id = "F4"
# T4_stamped.transform = message_from_transform(T1_inverse)
# br.sendTransform(T4_stamped)
if __name__ == "__main__":
rospy.init_node("tf2_examples")
br = tf2_ros.TransformBroadcaster()
rospy.sleep(0.5)
while not rospy.is_shutdown():
publish_transforms()
rospy.sleep(0.5)
| null | myCode/catkin_ws/src/tf2_examples/scripts/tf2_examples.py | tf2_examples.py | py | 2,269 | python | en | code | null | code-starcoder2 | 51 |
380117872 | # Paul J. Ruess
# University of Illinois at Urbana-Champaign
# Fall 2017
# Personal Research
# US Virtual Water Storage by County
import pandas
### READ IN RAW DATA ###
class alldata:
"""Class for reading in and cleaning harvest, yield,
and storage values from raw USDA data in .csv format"""
def __init__(self,harvest_path,harvest_cols,yield_path,yield_cols,storage_path,storage_cols,harvest_trim_list,yield_trim_list,codes_path,states_ignore):
"""All 'path' inputs must be strings leading to harvest,
yield, and storage data file paths, respectively
All 'cols' inputs must be lists of strings specifying
which columns to import for each dataset"""
# Read in dataset containing county grain harvest values
self.harvest_data = pandas.read_csv(harvest_path,usecols=harvest_cols)
# Read in dataset containing county grain yield values
self.yield_data = pandas.read_csv(yield_path,usecols=yield_cols)
# Read in dataset containing county grain storage values
self.storage_data = pandas.read_csv(storage_path,usecols=storage_cols)
# Read in dataset containing all county codes
self.county_codes = pandas.read_csv(codes_path)
# Manual cleaning unique to specific datasets
# Trim to contain only commodities existing for all available data
self.harvest_data = self.harvest_data[self.harvest_data['Data Item'].isin(harvest_trim_list)]
self.yield_data = self.yield_data[self.yield_data['Data Item'].isin(yield_trim_list)]
# Replace 'WILD RICE' with 'RICE' to simplify comparison with WF data in future
self.harvest_data.loc[self.harvest_data['Commodity'] == 'WILD RICE','Commodity'] = 'RICE'
# Remove summary county codes: 888 (District) and 999 (State)
self.county_codes = self.county_codes[-self.county_codes['County ANSI'].isin(['888','999'])][:-1]
# Remove empty rows (ie. no data in any columns) originated from reading in codes data from url
self.county_codes = self.county_codes[pandas.notnull(self.county_codes['County ANSI'])]
# Call cleaning function
self.clean_data(self.harvest_data,'Harvest_Acre')
self.clean_data(self.yield_data,'Yield_Bu_per_Acre')
self.clean_data(self.storage_data,'Storage_Bu')
# Make sure yield and harvest data are available for the same commodities
if len(self.harvest_data['Commodity']) != len(self.yield_data['Commodity']):
a_list = list( set(self.harvest_data['Commodity']) - set(self.yield_data['Commodity']) )
b_list = list( set(self.yield_data['Commodity']) - set(self.harvest_data['Commodity']) )
if len(a_list) > 0:
for a in a_list:
self.harvest_data = self.harvest_data[self.harvest_data['Commodity'] != a]
if len(b_list) > 0:
for b in b_list:
self.harvest_data = self.harvest_data[self.harvest_data['Commodity'] != b]
# Create GEOID column for datasets
self.create_geoid(self.county_codes)
self.create_geoid(self.harvest_data)
self.create_geoid(self.yield_data)
self.create_geoid(self.storage_data)
# Stretch yield and harvest data out
# self.harvest_data = self.stretch(self.harvest_data,'Harvest_Acre') # don't want this
# self.yield_data = self.stretch(self.yield_data,'Yield_Bu_per_Acre')
# Get fractional harvest distribution
self.harvest_data = self.harvest_fraction()
### CLEAN UP DATA ###
def clean_data(self,dataset,value_rename):
""" Cleans up datasets """
# Rename 'Value' column headers to have meaningful names
dataset.rename(columns={'Value': value_rename},inplace=True) # Rename column header
# Remove Alaska and Hawaii
dataset.drop(dataset[dataset['State'].isin(states_ignore)].index,inplace=True)
# Convert value columns to numeric by removing thousands' place comma
# and converting all non-numeric, ie. ' (D)', to 'NaN'
# Note that ' (D)' means data was 'Withheld to avoid disclosing data for individual operations'
dataset[[value_rename]] = dataset[[value_rename]].apply(
lambda x: pandas.to_numeric(x.astype(str).str.replace(',',''),
errors='coerce')
)
def create_geoid(self,dataset):
# Create GEOID column for yield data
dataset['State ANSI'] = dataset['State ANSI'].apply(
lambda x: '{0:02g}'.format(x) # formats leading zeros while ignoring decimal points
)
dataset['County ANSI'] = dataset['County ANSI'].apply(
lambda x: '{0:03g}'.format(x) # formats leading zeros while ignoring decimal points
)
dataset['GEOID'] = dataset['State ANSI'] + dataset['County ANSI']
# # Weighted average of land areas for all counties in 'other counties' from yield data
# def average_other_yields(self):
# # Remove 'Other counties' sections to determine which counties are accounted for
# yield_counties = self.yield_data[pandas.notnull(self.yield_data['County ANSI'])]
#
# # Create GEOID column for yield data
# yield_counties.loc[:,'State ANSI'] = yield_counties['State ANSI'].apply(
# lambda x: str(x).zfill(2)
# )
# yield_counties.loc[:,'County ANSI'] = yield_counties['County ANSI'].apply(
# lambda x: str(int(x)).zfill(3)
# )
# yield_counties.loc[:,'GEOID'] = yield_counties['State ANSI'] + yield_counties['County ANSI']
#
# # Remove counties included in yield_counties from area list using GEOID
# area_subset = self.area_data[-self.area_data['GEOID'].isin(yield_counties['GEOID'])]
#
# # Add column containing total land area by state
# area_subset.loc[:,'STATELAND'] = area_subset.groupby(['STATEFP'])['ALAND'].transform('sum')
#
# # Add column with fraction of 'other counties' land area made up by each county
# area_subset.loc[:,'LANDFRACTION'] = area_subset['ALAND']/area_subset['STATELAND']
#
# # Save class variables
# self.area_subset = area_subset
# self.yield_counties = yield_counties
# # Use average_other_yields() output to fractionally allocate 'other counties' yield data
# def stretch_yields(self):
# pass
# self.average_other_yields()
# # Make sure all counties are in yield_data
# Dis-aggregate data from 'other counties' to all existing counties
def stretch(self,dataset,value):
# dataset['STATE-DISTRICT'] = list(zip(dataset['State'], dataset['Ag District']))
others = dataset[dataset['County ANSI'] == 'nan']
nonothers = dataset[dataset['County ANSI'] != 'nan']
# print self.county_codes[(self.county_codes['State ANSI'] == '01') & (self.county_codes['District ANSI'] == 40)]
# print nonothers[(nonothers['State ANSI'] == '01') & (nonothers['Ag District Code'] == 40)]
# print others.head()
newrows = []
for i,r in others.iterrows():
d = nonothers[(nonothers['State'] == r['State']) & (nonothers['Ag District'] == r['Ag District']) & (nonothers['Commodity'] == r['Commodity'])] # dataframe of nonothers matching state-agdist-commodity of current 'others' row
# state_geoids = self.county_codes[self.county_codes['State ANSI'] == r['State ANSI']]['GEOID'].unique()
# other_geoids = set(state_geoids) - set(d['GEOID'].values)
# print d['GEOID'].unique()
# print d.head()
a = self.county_codes[(self.county_codes['State ANSI'] == r['State ANSI']) & (self.county_codes['District ANSI'] == r['Ag District Code'])]# dataframe of all counties matching state-agdist-commodity of current 'others' row
# print a['GEOID'].unique()
# print a.head()
nodata_geoids = set(a['GEOID'].unique()) - set(d['GEOID'].unique())
# df_to_add = dataset[(dataset['GEOID'].isin(nodata_geoids)) & (dataset['Commodity'] == r['Commodity'])]
# print df_to_add
# For each geoid not represented, copy 'others' data and add row with updated geoid (and county, etc.)
for g in nodata_geoids:
temprow = others.loc[i,]
c = self.county_codes[(self.county_codes['GEOID'] == g) & (self.county_codes['District ANSI'] == r['Ag District Code'])]
temprow.at['County'] = c['Name'].values[0]
temprow.at['GEOID'] = g
temprow.at['County ANSI'] = c['County ANSI'].values[0]
newrows.append(temprow)
# Create new dataframe
dfnew = nonothers.append(pandas.DataFrame(newrows,columns=others.columns)).reset_index()
return dfnew
# Convert harvest values to county-wide fractional harvest
# Add zero for counties with no harvest in a county
def harvest_fraction(self):
# print self.harvest_data[self.harvest_data['GEOID'] == '56015']
# Collect percentage of all area harvested by commodity for each state-county pair
self.harvest_data['Percent_Harvest'] = self.harvest_data['Harvest_Acre'] # initialize new column
df = self.harvest_data.groupby(['GEOID','State','Ag District','County','Commodity','Harvest_Acre'])['Percent_Harvest'].sum() #sum
harvest = df.groupby(['GEOID']).apply( #percent
lambda x: 100 * x / float(x.sum())
)
harvest = harvest.reset_index()
return harvest
# print harvest[harvest['GEOID'] == '56015']
# Create summary dataframe with all data organized by GEOID
def summary_df(self):
pass
# Calculate VW of storage
def calculate_vws(self):
pass
def scraps(self):
# Remove NaN values as negligible in Value columns
# harvest_data = harvest_data[pandas.notnull(harvest_data['Harvest_Acre'])]
# yield_data = yield_data[pandas.notnull(yield_data['Yield_Bu_per_Acre'])]
# storage_data = storage_data[pandas.notnull(storage_data['Storage_Bu'])]
# alldata = harvest_data.merge(
# storage_data,on=['State','State ANSI','County','County ANSI'])
# yield_data,on=['State','State ANSI','County','County ANSI','Commodity']).merge(
# alldata = alldata[['']]
# print harvest_data[harvest_data['County'] == 'AUTAUGA']
# print yield_data[yield_data['County'] == 'AUTAUGA']
# print storage_data[storage_data['County'] == 'AUTAUGA']
# print alldata[alldata['County'] == 'AUTAUGA']
# Convert '(D)' to NaN in storage dataframe
storage['Grain_Storage_Capacity_Bushels'] = storage['Grain_Storage_Capacity_Bushels'].apply(
lambda x: pandas.to_numeric(x,errors='coerce')
)
# Make Country and State names uppercase in storage dataframe
storage['State_Upper'] = storage['State_Name'].str.upper()
storage['County_Upper'] = storage['County'].str.upper()
# Merge cropdata with storage to add storage values to dataframe
harvest_storage = cropdata.merge(storage,left_on=['State','County'],right_on=['State_Upper','County_Upper'])
# Fix odd column names
harvest_storage.rename(columns={'State_x': 'State','County_x': 'County'},inplace=True) # Rename column header
# Collect percentage of all area harvested by commodity for each state-county pair
commodity_sum = harvest_storage.groupby(['GEOID','State','County','Commodity','Grain_Storage_Capacity_Bushels'])['Value'].sum() #sum
harvest = commodity_sum.groupby(['GEOID']).apply( #percent
lambda x: 100 * x / float(x.sum())
)
# Formatting
harvest = harvest.reset_index() # Makes output CSV pretty
harvest.rename(columns={'Value': 'Percent'},inplace=True) # Rename column header
# Add fractional grain storage to dataframe
harvest['Fractional_Grain_Storage_Bushels'] = harvest['Percent'] * 0.01 * harvest['Grain_Storage_Capacity_Bushels'] # Calculate fractional areas
# Add county land area to dataframe
# harvest = harvest.merge(county_area,left_on='GEOID',right_on='STCOU')
# harvest = harvest.drop('STCOU',1) # remove redundant column
# harvest.rename(columns={'LND110210D': 'LandArea_SqMi'},inplace=True) # Rename column header
# harvest['Fractional_Area_SqMi'] = harvest['Percent'] * 0.01 * harvest['LandArea_SqMi'] # Calculate fractional areas
# Output results to csv file
harvest.to_csv('county_fractional_grain_harvest.csv',index=False)
if __name__ == '__main__':
# All paths and column specifications for data class
harvest_path = 'usda_nass_data/usda_county_harvest_census_2012.csv' # Census data is more complete than Survey data
harvest_cols = ['State','State ANSI','County','County ANSI','Ag District','Ag District Code','Commodity','Data Item','Value']
yield_path = 'usda_nass_data/usda_county_yield_2012.csv'
yield_cols = ['State','State ANSI','County','County ANSI','Ag District','Ag District Code','Commodity','Data Item','Value']
storage_path = 'usda_nass_data/usda_county_storage_2012.csv' # Note: This data is overall grain storage; commodities not specified
storage_cols = ['State','State ANSI','County','County ANSI','Ag District','Ag District Code','Value']
codes_path = 'usda_nass_data/county_codes.csv'
areas_path = 'county_areas.csv'
# Lists of commodities to trim dataframes to
harvest_trim_list = ['BARLEY - ACRES HARVESTED',
'CORN, GRAIN - ACRES HARVESTED',
'OATS - ACRES HARVESTED',
'SORGHUM, GRAIN - ACRES HARVESTED',
'RICE - ACRES HARVESTED',
'RYE - ACRES HARVESTED',
'WILD RICE - ACRES HARVESTED', # later combined with rice
'WHEAT, SPRING, (EXCL DURUM) - ACRES HARVESTED', # later combined to wheat
'WHEAT, SPRING, DURUM - ACRES HARVESTED', # later combined to wheat
'WHEAT, WINTER - ACRES HARVESTED' # later combined to wheat
]
yield_trim_list = ['BARLEY - YIELD, MEASURED IN BU / ACRE',
'CORN, GRAIN - YIELD, MEASURED IN BU / ACRE',
'OATS - YIELD, MEASURED IN BU / ACRE',
'SORGHUM, GRAIN - YIELD, MEASURED IN BU / ACRE',
'RICE - YIELD, MEASURED IN LB / ACRE',
'RYE - YIELD, MEASURED IN BU / ACRE',
'WHEAT, SPRING, DURUM - YIELD, MEASURED IN BU / ACRE', # later combined to wheat
'WHEAT, SPRING, (EXCL DURUM) - YIELD, MEASURED IN BU / ACRE', # later combined to wheat
'WHEAT, WINTER - YIELD, MEASURED IN BU / ACRE' # later combined to wheat
]
states_ignore = ['ALASKA','HAWAII']
data = alldata(harvest_path,harvest_cols,yield_path,yield_cols,storage_path,storage_cols,harvest_trim_list,yield_trim_list,codes_path,states_ignore)
# print '------------------------------------------'
# print 'harvest coms ',data.harvest_data.Commodity.unique()
# print 'yield coms ',data.yield_data.Commodity.unique()
# print '------------------------------------------'
# print 'harvest data items ',data.harvest_data['Data Item'].unique()
# print 'yield data items ',data.yield_data['Data Item'].unique()
# print '------------------------------------------'
| null | research/grain_storage/archives/calculate_county_vws_0.py | calculate_county_vws_0.py | py | 15,780 | python | en | code | null | code-starcoder2 | 51 |
609285759 | import logging
from qwdeploy import exception
LOG = logging.getLogger(__name__)
class Deploy(object):
"""Deploy a Stack"""
name = 'deploy'
help = __doc__
params = []
def run(self):
raise exception.QwdeployError("command not implemented")
| null | qwdeploy/commands/deploy.py | deploy.py | py | 272 | python | en | code | null | code-starcoder2 | 51 |
549886257 | '''
Using Reddit's api/v1/
'''
import os
import requests
import requests.auth
import sys
import time
from .local_settings import *
from ..write_joke import *
# Files to read and write
jokes = '/Users/joannejordan/Desktop/GitHub/dad-joke-ai/dadjokes-subreddit-\
archive/otherrjokes.csv'
records = '/Users/joannejordan/Desktop/GitHub/dad-joke-ai/dadjokes-subreddit-\
archive/otherrecords.txt'
# Set reddit user agent
user_agent = f"{USER_AGENT} by {USERNAME}"
def get_auth():
'''Get authorization to use reddit's api
'''
# Steps presented in reddit's docs
client_auth = requests.auth.HTTPBasicAuth(CLIENT_ID, CLIENT_SECRET)
post_data = {"grant_type": "password", "username": USERNAME,
"password": PASSWORD}
headers = {"User-Agent": user_agent}
response = requests.post("https://www.reddit.com/api/v1/access_token",
auth=client_auth, data=post_data, headers=headers)
text = response.json()
print(text)
authorization = text['token_type'] + ' ' + text['access_token']
# Save authorization to file
variables = []
# First, read old file
with open('local_settings.py', 'r') as local:
for line in local:
variables.append(line)
# Overwrite old file
with open('local_settings.py', 'w') as local:
for line in variables[:-1]:
local.write(line + '\n')
local.write('AUTH = {authorization}')
return authorization
def get_jokes_page(after):
'''Requests jokes through reddit's online API, sidestepping PRAW's limit
on the history of the instance
'''
# See if old authorization works
headers = {'Authorization': AUTH, 'User-Agent': user_agent}
page = requests.get(f'https://oauth.reddit.com/r/dadjokes/new.json?\
limit=100&after={after}', headers=headers).json()
try:
if page['error'] == 401:
# Get new authorization
authorization = get_auth()
headers = {'Authorization': authorization,
'User-Agent': user_agent}
page = requests.get(f'https://oauth.reddit.com/r/dadjokes/new.json?\
limit=100&after={after}', headers=headers).json()
elif page['error'] == 429:
sys.stdout('Too Many Requests. Waiting...\n')
sys.stdout.flush()
for t in range(len(75)):
if t % 5 == 0:
sys.stdout('{15 + (t - 75) // 15} seconds')
sys.stdout('\r')
sys.stdout.flush()
time.sleep(.2)
print('Resuming')
get_jokes_page(after)
except:
pass
return page
def record_jokes(page, last):
'''Writes joke information to files.
'''
# Size of original file with jokes to compare to final and return
# error if nothing added
orig = os.path.getsize(jokes)
# Ensure object is indeed a listing, otherwise, check if 429 error.
# If 429 error, wait and repeat. Otherwise, raise error
try:
listing = page['data']['children']
after = page['data']['after']
before = page['data']['before']
except:
print(page)
raise
else:
with open(jokes, 'a') as joke_file:
for submission in listing:
sub_data = submission['data']
write_joke(sub_data, joke_file, 'requests')
new = os.path.getsize(jokes)
if new == orig:
raise ValueError('Nothing added')
with open(records, 'a') as rec:
rec.write(f'After: {after}\n')
return after
def get_last():
final = None
with open(records, 'r') as rec:
for line in rec:
final = line.split()[-1]
return final
if __name__ == "__main__":
last = get_last()
i = 1
if not last:
page = get_jokes_page(None)
prev = record_jokes(page, None)
print(f'Recorded page {i} with last submission: {prev}')
last = prev
i += 1
while last:
try:
page = get_jokes_page(last)
prev = record_jokes(page, last)
print(f'Recorded page {i} with last submission: {prev}')
last = prev
i += 1
except:
error = True
raise
| null | subreddits/limited_results_scripts/reddit_requests.py | reddit_requests.py | py | 4,290 | python | en | code | null | code-starcoder2 | 51 |
580020519 | import numpy as np
import math
import networkx as nx
import pickle,os
from copy import deepcopy,copy
from numpy import linalg as LA
def checks(A,B,e):
for eachA in range(len(A)):
for eachB in range(len(A[0])):
if abs(A[eachA][eachB] - B[eachA][eachB]) > e:
return False
return True
def similarity(G1,G2,e):
#G1 = nx.gnp_random_graph(10,p = 0.4,directed = True)
#G2 = nx.gnp_random_graph(10,p = 0.3,directed = True)
#print (len(G1))
#print (len(G2))
inG1 = [G1.predecessors(u) for u in G1.nodes()]
inG2 = [G2.predecessors(u) for u in G2.nodes()]
outG1 = [G1.successors(u) for u in G1.nodes()]
outG2 = [G2.successors(u) for u in G2.nodes()]
#print (inG1)
#print (inG2)
#print (outG1)
#print (outG2)
oS = [[-1.0 for i in range(len(G2))] for j in range(len(G1))]
S = [[0.1 for i in range(len(G2))] for j in range(len(G1))]
#print (S)
#e = 0.00001
Iterate = 100
counter = 0
while(counter < Iterate):
#print (counter)
_S = [[0.0 for i in range(len(G2))] for j in range(len(G1))]
for i in G1.nodes():
for j in G2.nodes():
if len(outG1[i]) == 0 or len(outG2[j]) == 0:
continue
for p in outG1[i]:
for q in outG2[j]:
_S[i][j] += S[p][q]
for i in G1.nodes():
for j in G2.nodes():
if len(inG1[i]) == 0 or len(inG2[j]) == 0:
continue
for p in inG1[i]:
for q in inG2[j]:
_S[i][j] += S[p][q]
#f = lambda x: x
#sq = np.sum([[f(_S[u][v]) for v in range(len(_S[0]))] for u in range(len(_S))])
PSD = B = np.dot(_S,np.transpose(_S))
w,v = LA.eig(PSD)
sq = math.sqrt(max(w))
_S = [[float(_S[j][i])/float(sq) for i in range(len(G2))] for j in range(len(G1))]
if checks(S,_S,e) or checks(oS,_S,e):
print ("Counter:",counter)
#print (S)
return np.array(S)
oS = copy(S)
S = copy(_S)
#print (np.array(S))
counter += 1
def check(Y,threshold):
for u in range(len(Y)):
for v in range(len(Y[0])):
if Y[u][v] > threshold:
return False
return True
def denominator(Y):
sum = 0.0
for u in range(len(Y)):
for v in range(len(Y[0])):
sum = sum + math.pow(Y[u][v],2)
for u in range(len(Y)):
for v in range(len(Y[0])):
Y[u][v] /= sum
return np.array(Y)
def blondelS(G1,G2,threshold):
iterate = 0
X = [[1.0 for u in range(len(G2))] for v in range(len(G1))]
Y = deepcopy(X)
while(True):
#print ("Iteration ",iterate)
#print (X)
#Y = deepcopy(X)
for e1 in G1.edges():
for e2 in G2.edges():
(p,i) = e1
(q,j) = e2
Y[p][q] += X[p][q]
Y[i][j] += X[p][q]
# print(Y)
if iterate > 0:
if check(Y,threshold) or iterate > 100:
print ("ITERATE:",iterate)
print (Y)
return Y
break
Y = denominator(deepcopy(Y))
X = deepcopy(Y)
iterate += 1
'''
os.chdir('/usr/local/home/sr3k2/sandbox/journalsGRN/modules/wsn/200')
G1 = nx.read_gml('mw0.gml')
mapping = pickle.load(open("mapping0.p","rb"))
G2 = nx.read_gml('grn_.gml')
Y = similarity(G1,G2,0.1)
a = 0.0
for u in mapping.keys():
a = a + Y[u][mapping[u]]
print ("Average similarity:",float(a)/len(G1))
''' | null | bioDRN/blondel.py | blondel.py | py | 3,679 | python | en | code | null | code-starcoder2 | 51 |
204011246 | ansl = []
MOD = 10**9+7
for _ in range(int(input())):
n,a,b = map(int, input().split())
if a+b > n:
ansl.append(0)
continue
d = n-a-b
no_cross = (d+1)*(d+2)
no_cross %= MOD
ans1 = no_cross * (n-a+1) * (n-b+1)
ans1 %= MOD
cross = (n-a+1)*(n-b+1) - no_cross
ans2 = no_cross*cross
ans2 %= MOD
# print('no_corss',no_cross)
# print('corss',cross)
# print()
ans = (ans1+ans2)%MOD
ansl.append(ans)
for a in ansl: print(a) | null | 1_contest/previous/hhkb2020/d.py | d.py | py | 501 | python | en | code | null | code-starcoder2 | 51 |
375993987 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/05/02 13:40
# @Author : c0l0121
# @File : searcher.py
# @Desc :
def binary_search(array, value):
""" 查找所给数值在有序数组中的位置,找不到则返回-1
:param array: 数组
:param value: 待查找的数值
:return: 找到则返回数值在数组中的下标,否则返回-1
"""
ret = -1
start = 0
end = len(array) - 1
while start <= end:
mid = (start + end) // 2
if array[mid] > value:
end = mid - 1
elif array[mid] < value:
start = mid + 1
else:
ret = mid
break
return ret
def binary_search_recursive(array, value, start=None, end=None):
""" 查找所给数值在有序数组中的位置,找不到则返回-1
:param start: 开始位置
:param end: 结束位置
:param array: 数组
:param value: 待查找的数值
:return: 找到则返回数值在数组中的下标,否则返回-1
"""
if start is None:
start = 0
if end is None:
end = len(array) - 1
if start > end:
return -1
mid = (start + end) // 2
if value < array[mid]:
return binary_search_recursive(array, value, start, mid - 1)
elif value > array[mid]:
return binary_search_recursive(array, value, mid + 1, end)
else:
return mid | null | data_structure/searcher.py | searcher.py | py | 1,414 | python | en | code | null | code-starcoder2 | 51 |
159965973 | from collections import deque
def wiki(xs):
current_max_len = 0
m = [0 for x in xs]
preds = [0 for x in xs]
longest = 0
for i, x in enumerate(xs):
lo, hi = 1, current_max_len
mid = (lo + hi) // 2
if xs[m[mid]] < x:
lo = mid + 1
else:
hi = mid - 1
longest, preds[i], m[lo] = (lo if lo > longest else longest,
m[lo - 1],
i)
rv = deque([xs[m[longest]]])
for x in reversed(xs):
if rv[0] > x:
rv.appendleft(x)
return rv
if __name__ == '__main__':
output = wiki([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5,
13, 3, 11, 4, 5, 6, 7, 15, 8, 9])
expected = [0, 2, 3, 4, 5, 6, 7, 8, 9]
print(output)
assert len(output) == len(expected)
| null | longest_increasing_subsequence.py | longest_increasing_subsequence.py | py | 847 | python | en | code | null | code-starcoder2 | 51 |
253861775 | from random import choice
import random
import aiohttp
import discord
from discord.ext import commands
from .utils.chat_formatting import *
from .utils.dataIO import dataIO
from .utils.dataIO import fileIO
from cogs.utils import checks
class TrustyBot:
def __init__(self, bot):
self.bot = bot
self.text = dataIO.load_json("data/trustybot/messages.json")
self.links = dataIO.load_json("data/trustybot/links.json")
self.images = dataIO.load_json("data/trustybot/images.json")
self.files = dataIO.load_json("data/trustybot/files.json")
self.donotdo = dataIO.load_json("data/dnd/donotdo.json")
def first_word(self, msg):
return msg.split(" ")[0]
def get_prefix(self, server, msg):
prefixes = self.bot.settings.get_prefixes(server)
for p in prefixes:
if msg.startswith(p):
return p
return None
def part_of_existing_command(self, alias, server):
'''Command or alias'''
for command in self.bot.commands:
if alias.lower() == command.lower():
return True
return False
async def on_message(self, message):
if len(message.content) < 2 or message.channel.is_private:
return
msg = message.content
server = message.server
channel = message.channel
prefix = self.get_prefix(server, msg)
if not prefix:
return
ignorelist = ["dickbutt", "cookie", "tinfoil", "donate", "dreams", "memes"]
alias = self.first_word(msg[len(prefix):]).lower()
if alias in ignorelist:
return
if alias in self.images:
image = self.images[alias]
await self.bot.send_typing(channel)
await self.bot.send_file(channel, image)
if alias in self.links:
link = self.links[alias]
await self.bot.send_typing(channel)
await self.bot.send_message(channel, link)
if alias in self.text:
msg = self.text[alias]
await self.bot.send_typing(channel)
await self.bot.send_message(channel, msg)
@commands.command(pass_context=True)
async def addimage(self, ctx, command):
"""Add an image to direct upload."""
author = ctx.message.author
server = ctx.message.server
channel = ctx.message.channel
prefix = self.get_prefix(server, ctx.message.content)
msg = ctx.message
if command is not "":
if command in self.images or self.part_of_existing_command(command, server):
await self.bot.say("{} is already in the list, try another!".format(command))
return
else:
await self.bot.say("{} added as the command!".format(command))
await self.bot.say("Upload an image for me to use!")
while msg is not None:
msg = await self.bot.wait_for_message(author=author, timeout=60)
if msg is None:
await self.bot.say("No image uploaded then.")
break
if msg.attachments != []:
filename = msg.attachments[0]["filename"]
directory = "data/trustybot/img/" + filename
if command is None:
command = filename.split(".")[0]
if directory in self.images.values():
seed = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))
directory = "data/trustybot/img/" + seed + filename
if directory not in self.images.values():
self.images[command] = directory
dataIO.save_json("data/trustybot/images.json", self.images)
with aiohttp.ClientSession() as session:
async with session.get(msg.attachments[0]["url"]) as resp:
test = await resp.read()
with open(self.images[command], "wb") as f:
f.write(test)
await self.bot.send_message(channel, "{} has been added to my files!"
.format(command))
break
if msg.content.lower().strip() == "exit":
await self.bot.say("Your changes have been saved.")
break
@commands.command()
async def listimages(self):
"""List images added to bot"""
msg = ""
for image in self.images.keys():
msg += image + ", "
await self.bot.say("```" + msg[:len(msg)-2] + "```")
@commands.command()
async def listtext(self):
"""List phrases added to bot"""
msg = ""
for text in self.text.keys():
msg += text + ", "
await self.bot.say("```" + msg[:len(msg)-2] + "```")
@commands.command()
async def listlinks(self):
"""List links added to bot"""
msg = ""
for link in self.links.keys():
msg += link + ", "
await self.bot.say("```" + msg[:len(msg)-2] + "```")
@commands.command(pass_context=True, aliases=["db"])
async def dickbutt(self, ctx):
"""DickButt"""
ext = ["png", "gif"]
if ctx.message.server.id != "261565811309674499":
await self.bot.upload(self.images["dickbutt"]
.format(choice(ext)))
@commands.command(pass_context=True)
async def neat(self, ctx, number=None):
"""Neat"""
files = "data/trustybot/img/neat{}.gif"
if number is None:
await self.bot.upload(files.format(str(choice(range(1, 6)))))
elif number.isdigit() and (int(number) > 0 or int(number) < 8):
await self.bot.upload(files.format(number))
@commands.command(pass_context=True)
async def cookie(self, ctx, user=None):
"""cookie"""
msg = "Here's a cookie {}! :smile:"
if user is None:
await self.bot.upload(self.images["cookie"])
else:
await self.bot.upload(self.images["cookie"],
content=msg.format(user))
@commands.command(pass_context=True, aliases=["tf"])
async def tinfoil(self, ctx):
"""Liquid Metal Embrittlement"""
await self.bot.upload(self.images["tinfoil"]
.format(choice(["1", "2"])))
@commands.command(pass_context=True,)
async def donate(self, ctx):
"""Donate some bitcoin!"""
gabcoin = "1471VCzShn9kBSrZrSX1Y3KwjrHeEyQtup"
DONATION = "1DMfQgbyEW1u6M2XbUt5VFP6JARNs8uptQ"
msg = "Feel free to send bitcoin donations to `{}` :smile:"
gabimg = "data/trustybot/img/gabbtc.jpg"
img = "data/trustybot/img/btc.png"
if ctx.message.server.id == "261565811309674499":
await self.bot.upload(gabimg)
await self.bot.say(msg.format(gabcoin))
else:
await self.bot.upload(img)
await self.bot.say(msg.format(DONATION))
# Text Commands #
@commands.command(hidden=False)
@commands.cooldown(1, 60, commands.BucketType.server)
async def grep(self):
"""Get the fuck out of here with grep!"""
await self.bot.say("Get the fuck out of here with grep!")
@commands.command(pass_context=True)
async def dnd(self, ctx, number=None):
if number is None:
await self.bot.say(choice(self.donotdo))
elif number.isdigit():
await self.bot.say(self.donotdo[int(number)-1])
else:
await self.bot.say(choice(self.donotdo))
@commands.command(hidden=False)
async def passphrase(self):
"""Wikileaks Vault7 Part 1 passphrase"""
await self.bot.say("`SplinterItIntoAThousandPiecesAndScatterItIntoTheWinds`")
@commands.command(name="pineal", aliases=["pineal gland"])
async def pinealGland(self, message=None):
"""Links to pineal gland"""
if message == "calcification" or message == "calcified":
await self.bot.say(self.links["pineal"][1])
if message == "healthy":
await self.bot.say(self.links["pineal"][2])
if message is None:
await self.bot.say(self.links["pineal"][0])
@commands.command(hiddent=False, pass_context=True)
async def illuminati(self, ctx):
"""o.o"""
emilum = ["\U0001F4A1", "\U000026A0", "\U0000203C", "\U000026D4"]
ilum = ":bulb: :warning: :bangbang: :no_entry:"
msg = await self.bot.say(ilum)
for i in emilum:
await self.bot.add_reaction(msg, emoji=i)
@commands.command(hidden=False)
async def halp(self, user=None):
"""How to ask for help!"""
msg = "{} please type `;help` to be PM'd all my commands! :smile:"
if user is None:
await self.bot.say(msg.format(""))
else:
await self.bot.say(msg.format(user))
@commands.command(hidden=False)
async def dreams(self):
"""don't let your dreams be dreams"""
await self.bot.say(self.text["dreams"].format("dreams"))
@commands.command(hidden=False)
async def memes(self):
"""don't let your memes be dreams"""
await self.bot.say(self.text["dreams"].format("memes"))
@commands.command(pass_context=True)
async def flipm(self, ctx, *, message):
"""Flips a message"""
msg = ""
name = ""
for user in message:
char = "abcdefghijklmnopqrstuvwxyz - ABCDEFGHIJKLMNOPQRSTUVWXYZ"
tran = "ɐqɔpǝɟƃɥᴉɾʞlɯuodbɹsʇnʌʍxʎz - ∀qƆpƎℲפHIſʞ˥WNOԀQᴚS┴∩ΛMX⅄Z"
table = str.maketrans(char, tran)
name += user.translate(table) + " "
await self.bot.say(msg + "(╯°□°)╯︵ " + name[::-1])
def setup(bot):
n = TrustyBot(bot)
bot.add_cog(n)
| null | trustybot/trustybot.py | trustybot.py | py | 9,979 | python | en | code | null | code-starcoder2 | 51 |
282268603 | #!coding:utf-8
import execjs
import re
import requests
from urllib3 import disable_warnings
from requests import Session
import wx
import io
import time
import uuid
import json
import base64
import sys
import Crypto
import traceback
from Crypto.Cipher import AES
# SECRET_KEY = 'B123JDVgT8WDGOWBgQv6EIhvxl4vDYvUnVdg-Vjdt11='
SECRET_KEY = '4vDYvUnVdg-Vjdt11='
HOST = 'http://vpn.yangmingcheng.online:8001'
'''
采用AES对称加密算法
'''
score_switch = True
timeout = 50
versionName = u'多设备业务办理0226'
if score_switch:
versionName = versionName + u'(积分版)'
# str不是16的倍数那就补足为16的倍数
def add_to_16(value):
while len(value) % 16 != 0:
value += '\0'
return str.encode(value) # 返回bytes
# 加密方法
def encrypt_text(text):
if type(text) is str:
to_encrypt_data = text
else:
to_encrypt_data = json.dumps(text)
# 秘钥
# key = '123456'
# 待加密文本
# text = 'abc123def4561111111111111111111111111111111111111111111111111111111111111111111111'
# 初始化加密器
aes = AES.new(add_to_16(SECRET_KEY), AES.MODE_ECB)
# 先进行aes加密
encrypt_aes = aes.encrypt(add_to_16(to_encrypt_data))
# 用base64转成字符串形式
encrypted_text = str(base64.encodebytes(encrypt_aes), encoding='utf-8') # 执行加密并转码返回bytes
# print(encrypted_text)
return encrypted_text
# 解密方法
def decrypt_text(text):
if not text:
return None
if type(text) is str:
to_decrypt_data = text
else:
to_decrypt_data = json.dumps(text)
# 秘钥
# key = '123456'
# 密文
# text = 'qR/TQk4INsWeXdMSbCDDdA=='
# 初始化加密器
aes = AES.new(add_to_16(SECRET_KEY), AES.MODE_ECB)
# 优先逆向解密base64成bytes
base64_decrypted = base64.decodebytes(to_decrypt_data.encode(encoding='utf-8'))
# 执行解密密并转码返回str
decrypted_text = str(aes.decrypt(base64_decrypted), encoding='utf-8').replace('\0', '')
try:
decrypted_text = json.loads(decrypted_text)
except:
pass
# print(decrypted_text)
return decrypted_text
# d = encrypt_text({'data':'123'})
# decrypt_text(d)
# exit()
#
# def encrypt_text(data):
# """
# 加密字符串
# :param text:需要加密的内容
# :return: 加密后的内容
# """
# if type(data) is str:
# to_encrypt_data = data.encode('utf-8')
# else:
# to_encrypt_data = json.dumps(data).encode('utf-8')
# cipher = Fernet(SECRET_KEY)
# return cipher.encrypt(to_encrypt_data).decode('utf-8')
#
#
# def decrypt_text(data):
# """
# 解密字符串
# :param text:需要解密的内容
# :return: 解密后的内容
# """
# if not data:
# return None
# if type(data) is str:
# to_decrypt_data = data.encode('utf-8')
# else:
# to_decrypt_data = json.dumps(data).encode('utf-8')
# cipher = Fernet(SECRET_KEY)
# decrypt_data = cipher.decrypt(to_decrypt_data).decode('utf-8')
# try:
# decrypt_data = json.loads(decrypt_data)
# except:
# pass
# return decrypt_data
print('请勿关闭此窗口!')
def GetMondrianData():
return b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00<\x00\x00\x006\x08\x06\x00\x00\x00' \
b'\x9bg\xfa\x14\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x04gAMA\x00\x00\xb1' \
b'\x8f\x0b\xfca\x05\x00\x00\x00\tpHYs\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7o\xa8d\x00' \
b'\x00\x05%IDAThC\xed\x99=o\x1dE\x14\x86\xf3\x0f\xe0\x1f\x90?\x90\x8f\x1e$hC\x93tI\x07\r' \
b'\xb4 A\x9f\x0f\x89.\xc8)\xa24`@\xc2\x9d\x91@\xa62\x14\xb1\x92\xc2\xf9\x90LbdK\x8el\x05' \
b'\xc7X\x18\x83\x85\xa3 \x02\x02i\xf0\xb3\xd9\xd79wtfw\xf6\xde\xd9(r\xee+\x1d\xd9w<\xbb{\x9e9' \
b'\x1f3{}(\xbc`\x1a\x03\x1ft\x8d\x81\x0f\xba\xc6\xc0}\xe9\xfe\xee?\xe1\xf3\xbb;\xe1\xec\xb5_' \
b'\xc2\xeb_\xaeV\xf6\xdew?W\x9f\x7f\xd8\xfa\xab\x9e\xd5\xbfz\x07\xbe\xfa\xd3\xa3\n\xee\xd0Gw' \
b'\x1a\xed\x95\xcb\xcb\xe1\xeb\x95\xdd\xfa\xaa\xfe\xd4\x1b0Q\xcb\x01\x8d\x8dk\xc8\x86\xbe\xd4' \
b'\x0b0\xa9\xfb\xd2\xc5\x1f]\xa0\xe3\x9f\xae\x84s\xd7\xb6\xc2\x17\x8b;an\xfdQ\xb8t\xeb\xb7\xf0' \
b'\xfe\xf7\x9b\xd5\xb8\xe6pm_\xd1.\x0e\xfc\xd6\xcc\x83\x01@\xd9\xa9\xe9\xfb\xad\x91#\xfd\xdf\x98z' \
b'\x9a\x15\xdc\xab\xb4\x8a\x01\xff\xf1\xf8?7\x85_\xfe\xb8{\xb4\xc8\x10\xae\xe3\xfac\x9f\xacT\xf7.' \
b'\xa5"\xc0\xd4+\x8e\xc5\xb0\xa4\xa9\xe7,c,\x02\x1dZFt\xad\xb8\xa7\xd2\xbc$\xf4\xc8\xc08\xe6\xd5' \
b'\xeb\xdb\xdf>p\x9d\x04.U\xdf\x8c\xf3w]\xc7\xcf\xd2\xd0#\x01\x13%\xcfy\x9aP,\x9c\xf5\xb2\xc03\xe6' \
b'\xd9\xbd\x99\xc5\xd3\xf8\xa8\xd0C\x03Sg\xb1\xa3\x18\xe3\xb1RY \xa3\xa1\xd1\xb9\xf9\xa9\xdae\xbe' \
b'\xbdW)\xe8\xa1\x80\xbb\xc0R\x9b)X\xe0l$\x110\xc0k\x8e\x07=J\xf7\xee\x0c\xec\xc1\xe2\xb8\x07\x9bZ' \
b'\x18,\xd5\xd0$\x16B\xd1\xe6\x08*\tz\xe2\xe6v=\xd2M\x9d\x80qPN\xc8\xbc(\xa1&X\xf6\xda\x9c\xb4d\x8e\x9a' \
b'\x96\xa2j\xc7\xe2\xce\x9e\xa3\xce\x11\xe6!r<\x05K\xa7\xd5\x9c\xd8\x88\x90\xa7\xd5\xdfg\xc3\xfc\xfaDe' \
b'\xfc.5AS*9\x0bg5t\r\x13%\x0f6u\xd2\xc2\xa8\xcdX\x7f\xff\xfb0L/\x9e\x0e\x13\xd7\x0f\x0f\xd8\xd4\xc2' \
b'\x89\xf0\xf0\xf1F5\xc7B+\x95\x95\xf2\'\xf7\x1a]\x17e\x01o\xff\xb9T9\xc1\xcf&5\xc1z5\xce\xfd\x00' \
b'\x8baeW\xe6\x8f\xec?\xd3B\xeb^l\x8b|\xeer\x92\xcb\x02&\nS\x0bo\x0e8\x10+\x05K\x14R\xb0\xdc\xcf\x03' \
b'\xb5\xc6\x1c\xa5\xb8\x85\x16$\xe5\xc3\xabenjg\xa7\xf4\x13\x07\x8fV\x11a\x01\xac\x9a`\xbd\xb4_\xda' \
b'\x9an\x85\x9d^<\x13&o\xbf\xb6\xff\x99k\x90\xa0\xa9_\xdd\x9b\xf2\xca\xed\xda\x9dj\x98\x87\xf2\xf0\x18' \
b'\xda\xeb\xc8M\xb0\x82\xf0\x8cLR\xed\xa2\x85\xcd\xcf\xf6\xff\x16C+\xb2X\xee;t\x160\x00r~n\xed|' \
b'\xedX\x1a:\x05\xabkS\x06l\x9c=\xe8IF\x1c\xad\xe6Xh\x9e\xc3\x1bZ\x17e\x01\xb3\x95\xd8:\x99\xbd\xf7a' \
b'\xed\xe0 4uE\'\xf6`gW>\x18\x80\x8b\x8d{6I%\xc5\\A\xf3\x1c\xa0\xa9\xe3\\e\x01+\x85\xec\x16\x90\x82' \
b'\x8e\xc5\xdf\x86\x81\xe5\x99\xec\xf9Zd\xe4A+\xb3r\x0f!\xd95\xccjrcm\xfeH\xd0\x93\xb7^u\xbb7\xb0,\x88' \
b'\x85\x8bmn\xedB=\xfb\xa9\x80\xd4\x9bU\xfc\xb2`\xa1\xd5\xbd9z\xe6\xa6v\x16\xb0RT\xabi\xb7\x19A\xd3u7v' \
b'\xe7\xeb\xd1P5\x9e6XE\xc9\xca\xc2\xcaR\xd0<\xd3[\xe8&e\x01\xb3z\x82d5S\xd0\x18\xa7&\xaci\xdb\xc1Y\x0f' \
b'\x96\x85M\xbdY\xd9\x17\x08$h\xb2\xab\xa9\xa4be\x01\xeb\x15O\x91\xe6\xbd\x15\',\xf4\xfc\xfa%\x17.6\x9c' \
b'\xf4\xa2\xd2\x04K\xff\xf0DJs\xcf\x99\xe5w\xea\x91ve\xd70\x90J-L\'\x1e\x0bM\xd4b@kl;\x1e\xac\x164\x06' \
b'\xc5\xda\xde\xac\xf4L\xbbw7)\x1b\x98\x87\xb2\x05\xa8S\xa7\xa0\xa9c5\x15k\x9c\x9c\xbc\xd4\xe3\xda\x18R' \
b'\xe6\xbdY\xf1\\\xfb\xbd\x17\xda\xd8\xbdQ\xff\xd6\xael`\xa4\xc3\xba\xf6=\x1ezxo\x7ff\xccB\x03\xc6\t\t\xc8' \
b'\x99\xe5w\xc3\xd2\xaf_\xd5\x7f\x19\x14\xc7A\x0bh\xcd\x83\xe54\x95\xea\xde\xb9\xea\x04\x8c\xf4\x8d\x83' \
b'\x00\xa9="o\xc7r\xd4\xf4f\xe5\xc1z5n\xb7\xc8\\u\x06\xb6\xa9\xac\xcd\x1eg4\xd6\xe6\x04\xd7S\x16\xd6qk' \
b'\xfc\xeb%V\xaa\xa1y\x0b\xd3\xa6\xce\xc0HQ\xb5\x9d\xdb.\x04\xdb\x98\x97n\x8c\xc5{\xac5/CJ\xc2\xa2\xa1\x80' \
b'\x11\xd1\xe5\xc1)h\x8chS\xf7\xcc\xa5^=\xc71\x16\xcf{\x89g\xcc\xbb\xc6\xfb\xe6$WC\x03#uX\x0b\x8d' \
b'\xf8">v2e4={\xad\xa4{\xc7\xd6\xa5Ox\x1a\t\x18Y\xc7\xac3DG\x1d<e\xec\xed^\xea\xeb4g\x8d,\x18\x15\x16\x8d' \
b'\x0c\x8c,t\xfc\xcd\xc37{\xe0\xd4\x1b\x07\x08\x19\x19\xe0E\x15x\xaf{\x03\xeb\xcd\x1fFE\x80\x91\x85\xa6' \
b'\x0b{\x91k\x92\xddc\xad\xd1\x13J\xc1\xa2b\xc0\x88\xe6\xa4=\x99/\x0crR\x90\x85I5\xb4\xb6c\xe50*\n\x8c' \
b'\x88\x14\x8e\xcai\xc09\x99i\xcfF@\xf0\x99ZMun\xef?\x90%T\x1cX\xb2\xff\x10\xebb\xa9-\xaa\x94z\x03Fq\xb4' \
b'\x9b\x0cP\x16\xa9t\n\xc7\xea\x15X\xa2\xe9\x90\xa21<\r\x89\x0eN\'\xef\x1bTz&\xc0\xcf\x93\xc6\xc0\x07]c' \
b'\xe0\x83\xae\x17\x0c8\x84\xff\x01p\r\x15\x02i\x1c$\x97\x00\x00\x00\x00IEND\xaeB`\x82'
# return b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\
# \x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00qID\
# ATX\x85\xed\xd6;\n\x800\x10E\xd1{\xc5\x8d\xb9r\x97\x16\x0b\xad$\x8a\x82:\x16\
# o\xda\x84pB2\x1f\x81Fa\x8c\x9c\x08\x04Z{\xcf\xa72\xbcv\xfa\xc5\x08 \x80r\x80\
# \xfc\xa2\x0e\x1c\xe4\xba\xfaX\x1d\xd0\xde]S\x07\x02\xd8>\xe1wa-`\x9fQ\xe9\
# \x86\x01\x04\x10\x00\\(Dk\x1b-\x04\xdc\x1d\x07\x14\x98;\x0bS\x7f\x7f\xf9\x13\
# \x04\x10@\xf9X\xbe\x00\xc9 \x14K\xc1<={\x00\x00\x00\x00IEND\xaeB`\x82'
def GetMondrianBitmap():
return wx.Bitmap(GetMondrianImage())
def GetMondrianImage():
stream = io.BytesIO(GetMondrianData())
return wx.Image(stream)
def GetMondrianIcon():
icon = wx.Icon()
icon.CopyFromBitmap(GetMondrianBitmap())
return icon
def get_mac_address():
mac = uuid.UUID(int=uuid.getnode()).hex[-12:]
return ":".join([mac[e:e + 2] for e in range(0, 11, 2)])
def get_score():
req_data = {
'wkNo': workNo,
'phone': login_phone,
'password': login_password,
'time': time.time(),
}
try:
res = requests.post(url=HOST + '/api/Login/', data={'data': encrypt_text(req_data)}, timeout=timeout)
# print(decrypt_text(json.loads(res.text)['data']))
data = decrypt_text(json.loads(res.text)['data'])
return data
except:
print(traceback.format_exc())
return
def cost_score(msg):
if '成功' in msg:
order_result = True
else:
order_result = False
req_data = {
'wkNo': workNo,
'msg': msg,
'order_result': order_result,
'time': time.time(),
}
try:
res = requests.post(url=HOST + '/api/OrderResult/', data={'data': encrypt_text(req_data)}, timeout=50)
data = decrypt_text(json.loads(res.text)['data'])
return data
except:
print(traceback.format_exc())
return
# workNo = 'J120249'
# d = cost_score('成功了')
# print(d)
# exit()
# data = b'My super secret message'
# d = encrypt_text(data)
# decrypt_text(d)
# exit()
disable_warnings()
# D920ABDB874BA71A3A7F4BFCDE6891F094693E0E4B617AB0
session = Session()
session.headers = {
'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',
'Accept-Language': 'zh-CN',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Accept-Encoding': 'gzip, deflate',
'Host': '211.138.30.200',
'Connection': 'Keep-Alive',
}
encryped_mobileNoNew, encryped_workNoNew, encryped_passwordNew, yzmbox, login_phone, workNo, login_password, clientMac = '', '', '', '', '', '', '', ''
def save_html(txt):
with open('error{}.html'.format(int(time.time())), 'w+') as f:
f.write(txt)
def save_code(data):
with open('code.jpg', 'wb+') as f:
f.write(data)
return data
class LOGIN(wx.Frame):
def __init__(self, parent, id):
self.locale = wx.Locale(wx.LANGUAGE_ENGLISH)
wx.Frame.__init__(self, parent, id, u'登录', size=(350, 400))
# 创建面板
panel = wx.Panel(self)
self.Bind(wx.EVT_CLOSE, self.OnClose)
# with open('login.js', encoding='utf-8') as f:
# js = f.read()
js = get_js_file()
self.ctx = execjs.compile(js)
# 添加容器,容器中控件按横向并排排列
bsizer_phone = wx.BoxSizer(wx.HORIZONTAL)
bsizer_wkno = wx.BoxSizer(wx.HORIZONTAL)
bsizer_password = wx.BoxSizer(wx.HORIZONTAL)
bsizer_mac = wx.BoxSizer(wx.HORIZONTAL)
bsizer_pic_code = wx.BoxSizer(wx.HORIZONTAL)
bsizer_msg_code = wx.BoxSizer(wx.HORIZONTAL)
bsizer_button = wx.BoxSizer(wx.HORIZONTAL)
self.login_tips_phone = wx.StaticText(panel, 0, u"手机号: ", style=wx.TE_LEFT)
self.login_tips_wkno = wx.StaticText(panel, 0, u"工号: ", style=wx.TE_LEFT)
self.login_tips_password = wx.StaticText(panel, 0, u"密码: ", style=wx.TE_LEFT | wx.EXPAND)
self.login_tips_mac = wx.StaticText(panel, 0, u"MAC: ", style=wx.TE_LEFT | wx.EXPAND)
self.login_tips_pic_code = wx.StaticText(panel, 0, u"图片验证码 :", style=wx.TE_LEFT | wx.EXPAND)
# self.msg_code_pic = wx.Icon(name='code.jpg', type=wx.BITMAP_TYPE_PNG)
self.img_data = self.get_code()
self.image = wx.Image(self.img_data, wx.BITMAP_TYPE_JPEG).ConvertToBitmap()
self.msg_code_pic = wx.BitmapButton(panel, -1, bitmap=self.image)
self.login_tips_msg_code = wx.StaticText(panel, 0, u"短信验证码 :", style=wx.TE_LEFT | wx.EXPAND)
self.bt_send_msg = wx.Button(panel, label='发送验证码')
self.Bind(wx.EVT_BUTTON, self.get_login_phone_msg, self.bt_send_msg)
self.bt_login = wx.Button(panel, label='登录')
self.Bind(wx.EVT_BUTTON, self.login, self.bt_login)
self.bt_login_cookie = wx.Button(panel, label='凭证登录')
bsizer_button.Add(self.bt_login, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,
border=5)
bsizer_button.Add(self.bt_login_cookie, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,
border=5)
self.Bind(wx.EVT_BUTTON, self.login_by_cookie, self.bt_login_cookie)
self.login_phone_box = wx.TextCtrl(panel, style=wx.TE_LEFT)
self.login_wkno_box = wx.TextCtrl(panel, style=wx.TE_LEFT)
self.login_password_box = wx.TextCtrl(panel, style=wx.TE_LEFT)
self.login_mac_box = wx.TextCtrl(panel, style=wx.TE_LEFT)
self.login_pic_code_box = wx.TextCtrl(panel, style=wx.TE_LEFT)
self.login_msg_code_box = wx.TextCtrl(panel, style=wx.TE_LEFT)
bsizer_phone.Add(self.login_tips_phone, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,
border=5)
bsizer_phone.Add(self.login_phone_box, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)
bsizer_wkno.Add(self.login_tips_wkno, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,
border=5)
bsizer_wkno.Add(self.login_wkno_box, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,
border=5)
bsizer_password.Add(self.login_tips_password, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)
bsizer_password.Add(self.login_password_box, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)
bsizer_mac.Add(self.login_tips_mac, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,
border=5)
bsizer_mac.Add(self.login_mac_box, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,
border=5)
bsizer_pic_code.Add(self.login_tips_pic_code, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)
bsizer_pic_code.Add(self.login_pic_code_box, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)
bsizer_pic_code.Add(self.msg_code_pic, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,
border=5)
self.Bind(wx.EVT_BUTTON, self.get_code, self.msg_code_pic)
bsizer_msg_code.Add(self.login_tips_msg_code, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)
bsizer_msg_code.Add(self.login_msg_code_box, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)
bsizer_msg_code.Add(self.bt_send_msg, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)
# wx.VERTICAL 横向分割
bsizer_all = wx.BoxSizer(wx.VERTICAL)
# 添加顶部sizer,proportion=0 代表bsizer_top大小不可变化
bsizer_all.Add(bsizer_phone, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
bsizer_all.Add(bsizer_wkno, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
bsizer_all.Add(bsizer_password, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
bsizer_all.Add(bsizer_mac, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
bsizer_all.Add(bsizer_pic_code, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
bsizer_all.Add(bsizer_msg_code, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
bsizer_all.Add(bsizer_button, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
panel.SetSizer(bsizer_all)
self.SetIcon(GetMondrianIcon())
# self.bt_send_act_msg = wx.Button(panel, label='发送验证码')
def OnClose(event, evt):
sys.exit(0)
def Onmsgbox(self, evt=None, msg=''):
wx.MessageBox(msg, "Message", wx.OK | wx.ICON_INFORMATION)
def encryption_data(self, workNo, login_phone, login_password, yzmbox):
d = self.ctx.call("sendRandomUser", workNo, login_phone, login_password, yzmbox)
return d[0], d[1], d[2]
def get_code(self, evt=None):
global session
code_url = self.ctx.call(u'yzmboxClick')
# print(u'获取验证码!')
data = session.get(url=code_url, verify=False)
# img_data = save_code(data.content)
data_stream = io.BytesIO(data.content)
if evt:
self.image = wx.Image(data_stream, wx.BITMAP_TYPE_JPEG).ConvertToBitmap()
# 更新GridBagSizer()的self.bmp2
self.msg_code_pic.SetBitmap(wx.Bitmap(self.image))
# self.msg_code_pic.SetBitmap(wx.BitmapFromBuffer(img_data))
self.Onmsgbox(evt, '刷新验证码成功!')
else:
return data_stream
def get_login_phone_msg(self, evt):
global session, encryped_mobileNoNew, encryped_workNoNew, encryped_passwordNew, yzmbox, login_phone, workNo, login_password, clientMac
try:
login_phone = self.login_phone_box.GetValue().strip()
workNo = self.login_wkno_box.GetValue().strip()
login_password = self.login_password_box.GetValue().strip()
yzmbox = self.login_pic_code_box.GetValue().strip()
clientMac = self.login_mac_box.GetValue().strip()
if not all([login_phone, workNo, login_password, yzmbox, clientMac]):
# print('某个字段为空')
self.Onmsgbox(evt, '某个字段为空!')
return ''
try:
encryped_mobileNoNew, encryped_workNoNew, encryped_passwordNew = self.encryption_data(
workNo, login_phone, login_password, yzmbox)
except:
self.Onmsgbox(evt, '参数不合法!')
return ''
url = 'https://211.138.30.200/WSKF/s_channel/ajaxLoginSendMsgAction.action'
parms = {
'userName': encryped_mobileNoNew,
'workNo': encryped_workNoNew,
'password': encryped_passwordNew,
'yzmbox': yzmbox,
'clientMac': clientMac,
}
# print('发送短信验证码!')
data = session.post(url=url, data=parms, timeout=timeout, verify=False)
if data.text == '验证码已经发送,请注意查收!':
# print('发送成功!')
self.Onmsgbox(evt, '验证码已经发送,请注意查收!')
action_frame.text_contents.AppendText(u'验证码已经发送,请注意查收!\n')
return True
else:
self.Onmsgbox(evt, '验证码发送失败!')
action_frame.text_contents.AppendText(u'验证码发送失败!\n')
# print(data.text)
return False
except requests.ReadTimeout:
self.Onmsgbox(msg='请求超时!')
print(traceback.format_exc())
print('请求超时!')
def first_request(self):
global session
try:
session.get(url='https://211.138.30.200/WSKF/s_channel/login.action', timeout=timeout, verify=False)
session.get(url='https://211.138.30.200/WSKF/s_channel/login.action', timeout=timeout, verify=False)
except:
print(traceback.format_exc())
return session
def open_cookie(self):
myCookie = {}
with open('cookies.json', 'r') as f:
listCookies = json.loads(f.read())
for cookie in listCookies:
myCookie.update({cookie['name']: cookie['value']})
return myCookie
def login_by_cookie(self, evt):
global session, workNo, login_phone, login_password
try:
# 第一次获取cookie
self.first_request()
cookies = self.open_cookie()
session.cookies._cookies['211.138.30.200']['/WSKF']['JSESSIONID'].value = cookies['JSESSIONID']
session.cookies._cookies['211.138.30.200']['/WSKF/s_channel']['randomStr_HW'].value = cookies[
'randomStr_HW']
workNo = cookies['workNo']
login_phone = cookies['login_phone']
login_password = cookies['login_password']
url = u'https://211.138.30.200/WSKF/s_channel/mainAction.action'
data = session.get(url=url, timeout=timeout, verify=False)
if u'请先输入工号' not in data.text:
# print(u'登录成功\n')
# print('手机号: {}\n'.format(re.findall('手机号:(.*?)</li>', data.text)[0]))
# print(u'工号: %s\n' % re.findall('工号:(.*?)</li>', data.text)[0])
if score_switch:
result = get_score()
if not result:
self.Onmsgbox(evt, u'登录失败,验证未通过')
return
self.Onmsgbox(evt, u'登录成功!')
self.Destroy()
action_frame.Show()
if score_switch:
action_frame.set_score(result['score'])
action_frame.text_contents.AppendText(u'登录成功!\n')
action_frame.text_contents.AppendText(u'手机号: %s\n' % re.findall(u'手机号:(.*?)</li>', data.text)[0])
action_frame.text_contents.AppendText(u'工号: %s\n' % re.findall(u'工号:(.*?)</li>', data.text)[0])
# return data.text
else:
# print(u'登录失败')
self.Onmsgbox(evt, u'登录失败,凭证过期')
self.get_code(evt)
return ''
except requests.ReadTimeout:
self.Onmsgbox(msg='请求超时!')
print(traceback.format_exc())
print('请求超时!')
except:
print(traceback.format_exc())
return ''
def login(self, evt):
global session, encryped_mobileNoNew, encryped_workNoNew, encryped_passwordNew, yzmbox, login_phone, workNo, login_password, clientMac
try:
# 第一次获取cookie
login_msg_code = self.login_msg_code_box.GetValue().strip()
login_phone = self.login_phone_box.GetValue().strip()
workNo = self.login_wkno_box.GetValue().strip()
login_password = self.login_password_box.GetValue().strip()
yzmbox = self.login_pic_code_box.GetValue().strip()
clientMac = self.login_mac_box.GetValue().strip()
if not all([clientMac, login_phone, workNo, login_password, login_msg_code]):
# print(u'某个字段为空')
self.Onmsgbox(evt, u'某个字段为空!')
return ''
self.first_request()
parms = {
'info.brower': 'IE7.0',
'info.os': 'Win10 32位',
'info.mac': clientMac,
'info.workno': workNo,
'info.mobileno': login_phone}
self.check_info(parms)
headers = {
'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',
'Referer': 'https://211.138.30.200/WSKF/s_channel/login.action',
'Accept-Language': 'zh-CN',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept-Encoding': 'gzip, deflate',
'Host': '211.138.30.200',
'Connection': 'Keep-Alive',
'Cache-Control': 'no-cache'
}
url = 'https://211.138.30.200/WSKF/s_channel/ajaxMainAction.action'
parms = {
'loginCondition.mobileNo': encryped_mobileNoNew,
'loginCondition.workNo': encryped_workNoNew,
'loginCondition.password': encryped_passwordNew,
'loginCondition.yzmbox': yzmbox,
'loginCondition.randCode': login_msg_code,
'clientIp': '',
'clientMac': clientMac
}
data = session.post(url=url, data=parms, headers=headers, timeout=timeout, verify=False)
cookies = [{"domain": "211.138.30.200", "name": "JSESSIONID", "value": "",
"path": "//WSKF", "httpOnly": True, "secure": True},
{"domain": "211.138.30.200", "name": "randomStr_HW", "value": "",
"path": "//WSKF/s_channel/", "httpOnly": True, "secure": True},
{'name': 'workNo', 'value': workNo},
{'name': 'login_phone', 'value': login_phone},
{'name': 'login_password', 'value': login_password}]
if u'请先输入工号' not in data.text:
for i in cookies:
if i['name'] == u'JSESSIONID':
i['value'] = session.cookies._cookies['211.138.30.200']['/WSKF']['JSESSIONID'].value
if i['name'] == u'randomStr_HW':
i['value'] = session.cookies._cookies['211.138.30.200']['/WSKF/s_channel']['randomStr_HW'].value
with open('cookies.json', 'w+') as f:
f.write(json.dumps(cookies))
# print('登录成功')
# print('手机号: %s' % re.findall('手机号:(.*?)</li>', data.text)[0])
# print('工号: %s' % re.findall('工号:(.*?)</li>', data.text)[0])
if score_switch:
result = get_score()
if not result:
self.Onmsgbox(evt, u'登录失败,验证未通过')
return
self.Onmsgbox(evt, u'登录成功!')
self.Destroy()
action_frame.Show()
if score_switch:
action_frame.set_score(result['score'])
action_frame.text_contents.AppendText(u'登录成功!\n')
action_frame.text_contents.AppendText(u'手机号: %s\n' % re.findall('手机号:(.*?)</li>', data.text)[0])
action_frame.text_contents.AppendText(u'工号: %s\n' % re.findall('工号:(.*?)</li>', data.text)[0])
return data.text
else:
# print(u'登录失败')
# print(data.text)
self.Onmsgbox(evt, '登录失败')
self.get_code(evt)
return ''
except requests.ReadTimeout:
self.Onmsgbox(msg='请求超时!')
print(traceback.format_exc())
print('请求超时!')
except:
print(traceback.format_exc())
return ''
def check_info(self, params):
global session
url = 'https://211.138.30.200/WSKF/s_channel/checkComInfo.action'
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'text/plain, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://211.138.30.200/WSKF/s_channel/login.action',
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Host': '211.138.30.200',
'Connection': 'Keep-Alive',
'Cache-Control': 'no-cache'
}
try:
session.post(url=url, headers=headers, data=params, verify=False, timeout=timeout)
return True
except requests.ReadTimeout:
self.Onmsgbox(msg='请求超时!')
print(traceback.format_exc())
print('请求超时!')
except:
# print(traceback.format_exc())
return
class YD_MAKE_ORDER(wx.Frame):
def __init__(self, parent, id):
self.locale = wx.Locale(wx.LANGUAGE_ENGLISH)
wx.Frame.__init__(self, parent, id, versionName, size=(550, 400))
self.user_score = 0
self.no_score = False
# 创建面板
panel = wx.Panel(self)
self.Bind(wx.EVT_CLOSE, self.OnClose)
# 添加容器,容器中控件按横向并排排列
bsizer_top = wx.BoxSizer(wx.HORIZONTAL)
self.act_type_list = [u'一键迁转', u'产品订购']
self.act_type_choose = wx.Choice(panel, -1, choices=self.act_type_list)
self.act_type_choose.Select(0)
self.Bind(wx.EVT_CHOICE, self.on_choice_act_type, self.act_type_choose)
self.bt_search_act2 = wx.Button(panel, label=u'锁定活动')
self.bt_login = wx.Button(panel, label=u'刷新')
self.Bind(wx.EVT_BUTTON, self.refrash, self.bt_login)
self.Bind(wx.EVT_BUTTON, self.lock_act, self.bt_search_act2)
# self.Bind(wx.EVT_BUTTON, self.one_flow, self.bt_login)
bsizer_top.Add(self.act_type_choose, proportion=4, flag=wx.EXPAND | wx.LEFT, border=4)
bsizer_top.Add(self.bt_search_act2, proportion=1, flag=wx.EXPAND | wx.LEFT, border=5)
bsizer_top.Add(self.bt_login, proportion=1, flag=wx.EXPAND | wx.LEFT, border=5)
bsizer_cust_phone = wx.BoxSizer(wx.HORIZONTAL)
self.st_tips3 = wx.StaticText(panel, 0, u"手机号 :", style=wx.TE_LEFT)
self.cust_phone_box = wx.TextCtrl(panel, style=wx.TE_LEFT)
# 搜索具体ID
self.act_id_box = wx.TextCtrl(panel, style=wx.TE_LEFT)
self.act_id_box.Enable(False)
self.bt_search_act = wx.Button(panel, label=u'查询活动')
self.Bind(wx.EVT_BUTTON, self.make_mv_action, self.bt_search_act)
bsizer_cust_phone.Add(self.st_tips3, proportion=0, flag=wx.EXPAND | wx.TOP | wx.RIGHT, border=10)
bsizer_cust_phone.Add(self.cust_phone_box, proportion=0, flag=wx.CENTER, border=15)
bsizer_cust_phone.Add(self.act_id_box, proportion=0, flag=wx.CENTER, border=15)
bsizer_cust_phone.Add(self.bt_search_act, proportion=1, flag=wx.EXPAND | wx.LEFT, border=5)
# bsizer_cust_phone.Add(self.bt_send_act_msg, proportion=0, flag=wx.EXPAND | wx.LEFT, border=15)
# bsizer_cust_phone.Add(self.bt_make_order, proportion=0, flag=wx.EXPAND | wx.LEFT, border=15)
# 业务选择
bsizer_act = wx.BoxSizer(wx.VERTICAL)
self.st_tips = wx.StaticText(panel, 0, u"活动选择 :", style=wx.TE_LEFT)
self.act_list = []
self.act_choose = wx.Choice(panel, -1, choices=self.act_list)
self.Bind(wx.EVT_CHOICE, self.on_choice, self.act_choose)
self.st_tips2 = wx.StaticText(panel, 0, u"套餐选择 :", style=wx.TE_LEFT)
self.act_son_list = []
self.act_son_choose = wx.Choice(panel, -1, choices=self.act_son_list)
self.Bind(wx.EVT_CHOICE, self.son_on_choice, self.act_son_choose)
bsizer_act.Add(self.st_tips, proportion=0, flag=wx.ALIGN_TOP, border=1)
bsizer_act.Add(self.act_choose, proportion=0, flag=wx.EXPAND | wx.ALIGN_TOP, border=1)
bsizer_act.Add(self.st_tips2, proportion=0, flag=wx.EXPAND | wx.ALIGN_TOP, border=1)
bsizer_act.Add(self.act_son_choose, proportion=0, flag=wx.EXPAND | wx.ALIGN_TOP, border=1)
bsizer_cust_phone_code = wx.BoxSizer(wx.HORIZONTAL)
self.cust_phone_code_tips = wx.StaticText(panel, 0, u"短信验证码 :", style=wx.TE_LEFT)
self.cust_phone_code_box = wx.TextCtrl(panel, style=wx.TE_LEFT)
self.bt_send_act_msg = wx.Button(panel, label=u'发送验证码')
self.Bind(wx.EVT_BUTTON, self.mv_send_msg, self.bt_send_act_msg)
self.user_score_tips = wx.StaticText(panel, 0, u"剩于点数 :", style=wx.TE_LEFT)
self.user_score_amount = wx.StaticText(panel, 0, str(self.user_score), style=wx.TE_LEFT)
self.bt_make_order = wx.Button(panel, label=u'办理')
self.Bind(wx.EVT_BUTTON, self.make_order, self.bt_make_order)
bsizer_cust_phone_code.Add(self.cust_phone_code_tips, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,
border=5)
bsizer_cust_phone_code.Add(self.cust_phone_code_box, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,
border=5)
bsizer_cust_phone_code.Add(self.bt_send_act_msg, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,
border=5)
bsizer_cust_phone_code.Add(self.bt_make_order, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)
bsizer_cust_phone_code.Add(self.user_score_tips, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)
bsizer_cust_phone_code.Add(self.user_score_amount, proportion=0,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)
# bsizer_act.Add(bsizer_cust_phone, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)
bsizer_act.Add(bsizer_cust_phone_code, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)
# self.act_choose.Select(0)
# 创建文本内容框,多行,垂直滚动条
self.text_contents = wx.TextCtrl(panel, style=wx.TE_MULTILINE | wx.VSCROLL)
# 添加容器,容器中控件按纵向并排排列
bsizer_center = wx.BoxSizer(wx.HORIZONTAL)
bsizer_bottom = wx.BoxSizer(wx.HORIZONTAL)
bsizer_bottom.Add(self.text_contents, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)
# wx.VERTICAL 横向分割
bsizer_all = wx.BoxSizer(wx.VERTICAL)
# 添加顶部sizer,proportion=0 代表bsizer_top大小不可变化
bsizer_all.Add(bsizer_top, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
bsizer_all.Add(bsizer_cust_phone, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
bsizer_all.Add(bsizer_act, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
bsizer_all.Add(bsizer_center, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
# 添加顶部sizer,proportion=1 代表bsizer_bottom大小变化
bsizer_all.Add(bsizer_bottom, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)
# self.Bind(wx.EVT_BUTTON, self.onOpen, self.bt_open)
panel.SetSizer(bsizer_all)
self.SetIcon(GetMondrianIcon())
# with open('login.js', encoding='utf-8') as f:
# js = f.read()
js = get_js_file()
self.ctx = execjs.compile(js)
self.refrash()
def OnClose(event, evt):
sys.exit(0)
def set_score(self, score):
self.user_score_amount.SetLabel(str(score))
def refrash(self, evt=None, phone=None):
self.rand_number = ''
self.userInfoUrl = ''
self.order_son_url = ''
self.cust_phone = ''
self.order_son_list = []
self.act_list = []
self.smsPassword = ''
self.order_value = ''
self.order_id = ''
self.son_order_data = ''
self.son_order_value = ''
self.son_order_id = ''
self.order_dic = {}
self.act_list_2 = []
if evt:
self.cust_phone_box.SetValue('')
self.cust_phone_code_box.SetValue('')
self.act_choose.SetItems([])
self.act_son_choose.SetItems([])
self.act_id_box.SetValue('')
if phone:
self.cust_phone_box.SetValue(phone)
self.make_mv_action()
else:
if score_switch:
result = get_score()
if not result:
self.Onmsgbox(evt, u'登录失败,验证未通过')
return
action_frame.set_score(result['score'])
self.Onmsgbox(evt, '刷新成功')
self.text_contents.AppendText(u'刷新成功!\n')
# def first_request(self):
# global session
# session.get(url='https://211.138.30.200/WSKF/s_channel/login.action', timeout=10, verify=False)
# session.get(url='https://211.138.30.200/WSKF/s_channel/login.action', timeout=10, verify=False)
# return session
def lock_act(self, evt):
self.Onmsgbox(evt, '未开放')
def get_frame_code(self, html, key):
# key = '迁转活动一键办理' '产品订购'
data = re.findall('menuNodeClick\((.*?)\);"><span class="sp">{}</span></li>'.format(key), html)[0]
return [i.strip('\'') for i in data.split(',')]
# 获取用户信息
def get_userInfo(self):
global session
try:
if self.act_type_choose.GetCurrentSelection() == 0:
url = ' https://211.138.30.200/WSKF/s_channel/ajaxUserInfoCheck.action'
params = {
'useRequiredId': self.userInfoUrl.split('&')[0],
'pay.mobileNoForGuest': self.userInfoUrl.split('&')[1].split('=')[1],
't': self.userInfoUrl.split('=')[-1]
}
headers = {
'Accept': 'text/plain, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://211.138.30.200/WSKF/s_channel/mealTransferInit.action?CBBSaleFlag=sale&auth=TCQZ&t={}'.format(
self.rand_number),
'Accept-Language': 'zh-CN',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Accept-Encoding': 'gzip, deflate',
'Host': '211.138.30.200',
'Connection': 'Keep - Alive',
}
data = session.post(url=url, headers=headers, data=params, verify=False, timeout=timeout)
self.text_contents.AppendText('用户信息\n')
self.text_contents.AppendText(data.text.replace('</b>', '').replace(' ', '').replace('<b>', ' '))
self.text_contents.AppendText('\n')
return data.text
else:
params = {
'incrementProductBookCondition.productType': 2,
'incrementProductBookCondition.mobileNo': self.cust_phone,
}
headers = {
'Accept': 'text/plain, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action?incrementProductBookCondition.productType=2&CBBSaleFlag=sale&auth=L&t={}'.format(
self.rand_number),
'Accept-Language': 'zh-CN',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Accept-Encoding': 'gzip, deflate',
'Host': '211.138.30.200',
'Connection': 'Keep - Alive',
}
data = session.post(url=self.userInfoUrl, headers=headers, data=params, verify=False, timeout=timeout)
self.text_contents.AppendText('查询中...\n')
return data.text
except requests.ReadTimeout:
self.Onmsgbox(msg='请求超时!')
print(traceback.format_exc())
print('请求超时!')
# 选择办理类型
def on_choice_act_type(self, evt=None):
if evt:
self.refrash(evt, self.cust_phone)
if self.act_type_choose.GetCurrentSelection() == 1:
self.act_son_choose.Enable(False)
self.act_id_box.Enable(True)
else:
self.act_son_choose.Enable(True)
self.act_id_box.Enable(False)
# 验证短信
def check_msg(self):
headers = {
'X-Requested-With': 'XMLHttpRequest',
'X-Prototype-Version': '1.7.2',
'Accept': 'text/javascript, text/html, application/xml, text/xml, */*',
'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://211.138.30.200/WSKF/s_channel/mealTransferInit.action?CBBSaleFlag=sale&auth=TCQZ&t={}'.format(
self.rand_number),
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Host': '211.138.30.200',
'Connection': 'Keep-Alive',
'Cache-Control': 'no-cache',
}
if self.act_type_choose.GetCurrentSelection() == 1:
headers[
'Referer'] = 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action?incrementProductBookCondition.productType=2&CBBSaleFlag=sale&t={}'.format(
self.rand_number)
url = 'https://211.138.30.200/WSKF/s_channel/ajaxCheckCrmSms.action'
params = {
'mobileNo': self.cust_phone,
'checkCode': self.smsPassword
}
try:
data = session.post(url=url, headers=headers, data=params, verify=False, timeout=timeout)
except requests.ReadTimeout:
self.Onmsgbox(msg='请求超时!')
print(traceback.format_exc())
print('请求超时!')
except:
# print(traceback.format_exc())
pass
# self.check_userInfo()
# 创建一个任务
def make_mv_action(self, evt=None):
# mv_frame_params = get_frame_code(html, '迁转活动一键办理')
self.cust_phone = self.cust_phone_box.GetValue().strip()
self.user_act_id = self.act_id_box.GetValue().strip()
if not self.cust_phone:
self.Onmsgbox(None, '先输入手机号')
return
# self.rand_number = random.randint(10000, 90000)
self.rand_number = 40713
headers = {
'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',
'Referer': 'https://211.138.30.200/WSKF/s_channel/mainAction.action',
'Accept-Language': 'zh-CN',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Accept-Encoding': 'gzip, deflate',
'Host': '211.138.30.200',
'Connection': 'Keep - Alive',
}
if self.act_type_choose.GetCurrentSelection() == 0:
url = 'https://211.138.30.200/WSKF/s_channel/mealTransferInit.action'
params = {
'CBBSaleFlag': 'sale',
'auth': 'TCQZ',
't': str(self.rand_number)
}
else:
url = 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action'
params = {
'incrementProductBookCondition.productType': 2,
'CBBSaleFlag': 'sale',
'auth': 'L',
't': str(self.rand_number)
}
data = session.get(url=url, headers=headers, data=params, verify=False, timeout=timeout)
# print(data.status_code)
if self.act_type_choose.GetCurrentSelection() == 0:
self.userInfoUrl = \
re.findall('/s_channel/ajaxUserInfoCheck\.action\?useRequiredId=(.*?"&t=.*?)"', data.text)[
0].replace(
'"+mobileNoForGuest+"', str(self.cust_phone))
self.randomString = re.findall('name="randomString" value="(.*?)"', data.text)[0]
else:
self.userInfoUrl = 'https://211.138.30.200/WSKF/s_channel/ajaxIncrementProMarketingAction.action'
self.randomString = re.findall('name="randomString" value="(.*?)"', data.text)[0]
# 获取用户信息
# userInfo = self.get_userInfo()
# print(userInfo)
if self.act_type_choose.GetCurrentSelection() == 0:
if evt:
self.order_son_url = re.findall(
'"(ajaxGetTransferMeal\.action\?ployId="\+document\.getElementById\("ployId"\)\.value\+"&serviceMobileNo="\+mobileNoForGuest\+"&t=.*?)"',
data.text)[0].replace('"+document.getElementById("ployId").value+"', '{}').replace(
'"+mobileNoForGuest+"',
'{}')
self.act_list = [i.strip('\t') for i in re.findall(
'<tr class="table_list_left"><td class="table_list_right"> <input type="radio" name="ployCode" value=.*?>.*?>.*?>(.*?)</td></tr>',
data.text)]
self.act_choose.SetItems(self.act_list)
# for i in self.act_list:
# print(i)
else:
if evt:
if self.user_act_id:
url = 'https://211.138.30.200/WSKF/s_channel/ajaxShowSearchIncBookNext.action'
headers[
'Referer'] = 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action?incrementProductBookCondition.productType=2&CBBSaleFlag=sale&t={}'.format(
self.rand_number)
params = {
'phoneForGuest': self.cust_phone,
'searchKey': self.user_act_id,
}
try:
data = session.post(url=url, headers=headers, data=params, verify=False, timeout=timeout)
except requests.ReadTimeout:
self.Onmsgbox(msg='请求超时!')
print(traceback.format_exc())
print('请求超时!')
except:
print(traceback.format_exc())
return
rec_data = data.json()
# self.act_list = {"message": {
# "600000611567": ["600000611567", "19元家庭流量包2个月优惠活动(主副卡)", "X13201812015001", "1",
# "68元及以上套餐客户订购19元家庭流量包,赠送19元*2个月分摊流量费,参与活动60天优惠期内套餐不能降档,家庭流量包不能退订或降档。",
# "2019-06-05 23:59:59"]}, "pageIndex": 1, "startIndex": 0}
# '('600000266297')">10元语音包免费用6个月(X00592103)</'
self.act_list = []
for i in rec_data['message'].keys():
self.act_list.append([i, rec_data['message'][i][1] + '(' + rec_data['message'][i][2] + ')'])
# 查询出所有增值产品
# d = {
# "ipbMap": {
# "100160000164": ["100160000164", "短信派6元包", "PIXZF01", "1", "2999-12-31 23:59:59"],
# "100160000165": ["100160000165", "来电提醒", "PIXBPA117", "1", "2999-12-31 23:59:59"],
# "100160000299": ["100160000299", "优惠新干线", "PIXYHXGX", "1", "2099-12-31 00:00:00"]},
# }
all_act_json = self.find_all_act()
for i in all_act_json['ipbMap'].keys():
if (self.user_act_id not in i) and (
self.user_act_id not in all_act_json['ipbMap'][i][1] + '(' + all_act_json['ipbMap'][i][
2] + ')'):
continue
self.act_list.append(
[i, all_act_json['ipbMap'][i][1] + '(' + all_act_json['ipbMap'][i][2] + ')'])
else:
self.act_list = self.get_book_list()
self.act_choose.SetItems([i[1] + '/' + i[0] for i in self.act_list])
# else:
# self.act_list = self.get_book_list()
# for i in self.act_list:
# print(i)
if evt:
self.Onmsgbox(evt, '查询成功!')
self.text_contents.AppendText('查询成功!\n')
# 查询所有增值产品
def find_all_act(self):
# 该接口可查询出用户的账户信息
params = [('incrementProductBookCondition.productType', 3),
('incrementProductBookCondition.mobileNo', self.cust_phone), ]
headers = {
'Accept': 'text/plain, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action',
'Accept-Language': 'zh-CN',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Accept-Encoding': 'gzip, deflate',
'Host': '211.138.30.200',
'Connection': 'Keep - Alive',
}
try:
data = session.post(url='https://211.138.30.200/WSKF/s_channel/queryMarketingProduct.action',
headers=headers, data=params, verify=False, timeout=timeout)
final_data = data.json()
self.act_list_2 = [i for i in final_data['ipbMap'].keys()]
return final_data
except requests.ReadTimeout:
self.Onmsgbox(msg='请求超时!')
print(traceback.format_exc())
print('请求超时!')
# 查询活动
def get_book_list(self):
# 办理活动 子项目查询
global session
params = [('randomString', self.randomString), ('incrementProductBookCondition.productName', ''),
('incrementProductBookCondition.mobileNo', self.cust_phone),
('incrementProductBookCondition.productType', '2'), ('imeiNo', ''), ('searchProduct', ''),
('searchProduct', ''), ('incrementProductBookCondition.str2', '',)]
headers = {
'Accept': 'text/plain, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action?incrementProductBookCondition.productType=2&CBBSaleFlag=sale&auth=L&t={}'.format(
self.rand_number),
'Accept-Language': 'zh-CN',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Accept-Encoding': 'gzip, deflate',
'Host': '211.138.30.200',
'Connection': 'Keep - Alive',
}
try:
data = session.post(url='https://211.138.30.200/WSKF/s_channel/incrementProBookListAction.action',
headers=headers, data=params, verify=False, timeout=timeout)
self.randomString = re.findall('name="randomString" value="(.*?)"/>', data.text)[0]
act_list = re.findall('\'(.*?)\'\)">(.*?)</', data.text)
return act_list
except requests.ReadTimeout:
self.Onmsgbox('请求超时!')
print(traceback.format_exc())
print('请求超时!')
return []
def on_choice(self, event):
self.order_value = event.GetString()
# print("选择{0}".format(self.order_value))
if self.act_type_choose.GetCurrentSelection() == 1:
return
self.order_id = self.act_choose.GetStringSelection().replace('(', '(').replace(')', ')').split('(')[-1].rstrip(
')')
self.get_order_son()
def son_on_choice(self, event):
self.son_order_data = event.GetString()
# print("选择{0}".format(self.son_order_data))
self.son_order_value = self.son_order_data
self.son_order_id = self.order_dic[self.son_order_data]
def get_order_son(self):
url = 'https://211.138.30.200/WSKF/s_channel/ajaxGetTransferMeal.action'
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'text/html, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://211.138.30.200/WSKF/s_channel/mainAction.action',
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Host': '211.138.30.200',
'Connection': 'Keep-Alive',
'Cache-Control': 'no-cache',
}
if not self.cust_phone:
self.Onmsgbox(None, '先输入手机号')
return
params = {
'ployId': self.order_id,
'serviceMobileNo': self.cust_phone,
# self.order_son_url.format(self.order_id, self.cust_phone).split('&')[1].split('=')[1],
't': self.order_son_url.format(self.order_id, self.cust_phone).split('&')[2].split('=')[1]
}
try:
data = session.post(url=url, headers=headers, data=params, verify=False, timeout=timeout)
self.order_dic = {}
try:
self.order_son_list = re.findall('<span id=\'instanceId(.*?)\'>(.*?)</span>', data.text)
for i in self.order_son_list:
self.order_dic[i[1]] = i[0]
self.act_son_choose.SetItems(list(self.order_dic.keys()))
except:
self.order_son_list = []
if not self.order_son_list:
self.Onmsgbox(None, '没有套餐可以选择')
self.text_contents.AppendText('没有套餐可以选择\n')
except requests.ReadTimeout:
self.Onmsgbox('请求超时!')
print(traceback.format_exc())
print('请求超时!')
return None
# 68418
def mv_send_msg(self, evt):
global session
self.cust_phone = self.cust_phone_box.GetValue().strip()
if not self.cust_phone:
self.Onmsgbox(None, '先输入手机号')
return
parms = {
'mobileNo': self.cust_phone
}
headers = {
'X-Requested-With': 'XMLHttpRequest',
'X-Prototype-Version': '1.7.2',
'Accept': 'text/javascript, text/html, application/xml, text/xml, */*',
'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://211.138.30.200/WSKF/s_channel/mealTransferInit.action?CBBSaleFlag=sale&auth=TCQZ&t={}'.format(
self.rand_number),
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Host': '211.138.30.200',
'Connection': 'Keep-Alive',
'Cache-Control': 'no-cache',
}
if self.act_type_choose.GetCurrentSelection() == 1:
headers[
'Referer'] = 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action?incrementProductBookCondition.productType=2&CBBSaleFlag=sale&t={}'.format(
self.rand_number)
try:
data = session.post(url='https://211.138.30.200/WSKF/s_channel/ajaxSendCrmSms.action', headers=headers,
data=parms,
verify=False)
self.Onmsgbox(evt, data.text)
self.text_contents.AppendText(data.text + '\n')
if data.text == u'随机码发送成功!':
return True
else:
return False
except requests.ReadTimeout:
self.Onmsgbox('请求超时!')
print(traceback.format_exc())
print('请求超时!')
return None
def make_order(self, evt):
global session
try:
if score_switch:
# 更新分数
result = get_score()
if not result:
self.Onmsgbox(evt, u'验证未通过')
return
else:
self.set_score(result['score'])
if result['score'] <= 0:
self.Onmsgbox(evt, u'剩余点数不足')
self.no_score = True
return
# 取消刷新数据
# self.make_mv_action()
# if self.no_score:
# return
if self.act_type_choose.GetCurrentSelection() == 1:
self.smsPassword = self.cust_phone_code_box.GetValue().strip()
self.check_msg()
# order_value = self.act_choose.StringSelection
self.proId = self.order_value.split('/')[1]
self.proName = self.order_value.split('/')[0]
url = 'https://211.138.30.200/WSKF/s_channel/ajaxIsPrivalNo.action'
params = {
'mobileNo': self.cust_phone,
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'text/html, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://211.138.30.200/WSKF/s_channel/incrementProBookListAction.action',
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Host': '211.138.30.200',
'Connection': 'Keep-Alive',
'Cache-Control': 'no-cache',
}
data = session.post(url=url, headers=headers, data=params, verify=False)
url = 'https://211.138.30.200/WSKF/s_channel/ajaxIsCheck.action'
params = {
'mobileNo': self.cust_phone,
'proId': self.proId,
}
data = session.post(url=url, headers=headers, data=params, verify=False)
url = 'https://211.138.30.200/WSKF/s_channel/ajaxWhetherNeedAction.action'
params = {
'mobileNo': self.cust_phone,
'product': self.proId,
'productType': 3 if self.proId in self.act_list_2 else 2
}
data = session.post(url=url, headers=headers, data=params, verify=False)
url = 'https://211.138.30.200/WSKF/s_channel/incrementProductBookAction.action'
params = [('randomString', self.randomString),
('incrementProductBookCondition.productName', self.proName),
('incrementProductBookCondition.mobileNo', self.cust_phone),
('incrementProductBookCondition.productType', '3' if self.proId in self.act_list_2 else '2'),
('imeiNo', ''),
('searchProduct', ''),
('searchProduct', ''),
('incrementProductBookCondition.product', self.proId),
('incrementProductBookCondition.str2', '',)]
data = session.post(url=url, headers=headers, data=params, verify=False)
if self.proId not in self.act_list_2:
self.mainPriceId_N = re.findall("name=\'mainPriceId_N\' value=\'(.*?)\'/>", data.text)
self.randomString = re.findall('name="randomString" value="(.*?)">', data.text)[0]
url = 'https://211.138.30.200/WSKF/s_channel/bookIncPro.action'
mainPriceId_N_params = '|'.join(self.mainPriceId_N) + '|'
params = [('randomString', self.randomString),
('incrementProductBookCondition.productName', ''),
('incrementProductBookCondition.product', self.proId),
('incrementProductBookCondition.productType', '2'),
('mainAndSubPriceId', mainPriceId_N_params),
('incProdAtrrs', ''),
('incrementProductBookCondition.mobileNo', self.cust_phone),
('termIMEI', '',)]
for i in self.mainPriceId_N:
params.append(('mainPriceId_N', i))
headers = {
'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',
'Referer': 'https://211.138.30.200/WSKF/s_channel/incrementProductBookAction.action',
'Accept-Language': 'zh-CN',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept-Encoding': 'gzip, deflate',
'Host': '211.138.30.200',
'Connection': 'Keep-Alive',
'Cache-Control': 'no-cache'
}
data = session.post(url=url, headers=headers, data=params, verify=False)
else:
self.smsPassword = self.cust_phone_code_box.GetValue().strip()
if not self.smsPassword or not self.cust_phone:
self.Onmsgbox(None, '信息不完整!')
return
self.check_msg()
url = 'https://211.138.30.200/WSKF/s_channel/mealTransferSubmit.action'
params = {
'randomString': self.randomString,
'serviceMobileNo': self.cust_phone,
'certType': '200',
'password': '',
'smsPassword': self.smsPassword.strip(),
'ployView': self.order_value,
'ployId': self.order_id,
'productView': self.son_order_value,
'productId': self.son_order_id,
'instanceId': '',
# 'productView': '移动流量王全国版-18元套餐(80分钟+200M)(PIXFXQG1)',
# 'productId': '100168001088',
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'text/html, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://211.138.30.200/WSKF/s_channel/mealTransferInit.action?CBBSaleFlag=sale&auth=TCQZ&t={}'.format(
self.rand_number),
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Host': '211.138.30.200',
'Connection': 'Keep-Alive',
'Cache-Control': 'no-cache',
}
data = session.post(url=url, headers=headers, data=params, verify=False)
try:
result = re.findall('<img src=".*?"/>([\w\W]*?)<', data.text)[0].strip()
except:
save_html(data.text)
result = '办理失败'
self.refrash(evt=evt, phone=self.cust_phone)
if score_switch:
# 根据结果是否扣分
try:
rev_data = cost_score(result)
except:
rev_data = None
if rev_data:
self.set_score(rev_data['score'])
self.Onmsgbox(evt, rev_data['msg'])
self.text_contents.AppendText(result + '\n')
else:
self.Onmsgbox(evt, '验证失败')
# print(result)
else:
self.Onmsgbox(evt, result)
self.text_contents.AppendText(result + '\n')
self.refrash(evt=evt, phone=self.cust_phone)
except requests.ReadTimeout:
self.Onmsgbox('请求超时!')
print(traceback.format_exc())
print('请求超时!')
except:
print(traceback.format_exc())
pass
def Onmsgbox(self, event=None, msg=''):
wx.MessageBox(msg, "Message", wx.OK | wx.ICON_INFORMATION)
def get_js_file():
file = '''function login(obj)
{
var workNo = document.getElementById("workNo").value;
var mac = $("#clientMac").val();
if(!mac){
if(workNo == "A830000"){
alert("login拦截mac为空");
}
if(confirm("为配合MAC地址信息登记,请您下载并运行控件,以免影响您的登录!")){
var location=window.location.href;
var end=location.indexOf("WSKF");
window.location.href=location.substr(0,end+4)+"/download/IE自动设置.zip";
}
return;
}
var mobileNo = document.getElementById("mobileNo").value;
var password = document.getElementById("password").value;
var randCode = document.getElementById("randCode").value;
var yzmbox = document.getElementById("yzmbox").value;
//var isNumber = /^[-]?\d+[.]?\d*$/; //在火狐上此正则表达式不能识别 //||!isNumber.test(mobileNo)
if(mobileNo == 'undefined' || mobileNo.length != 11)
{
alert("请输入11位的电话号码!");
return ;
}
if(workNo == 'undefined' || workNo.length == 0)
{
alert("请输入代理商工号!");
return ;
}
//var validatePwd = /^(?=.*?[a-z])(?=.*?[A-Z])(?=.*?\d)(?=.*?[#@*&.])[a-zA-Z\d#@*&.]*$/;
//var validatePwd = /^(\w)*([#@$^*&.()]*)/;
if(password == 'undefined' || password.length == 0)
{
alert("请输入密码!");
return ;
}
if(yzmbox =='undefined' || yzmbox.length == 0){
alert("请输入图片验证码!");
return ;
}
if(randCode == 'undefined' || randCode.length == 0)
{
alert("请输入获取的短信验证码!");
return ;
}
registComInfo(workNo);
/*
*针对敏感数据明文传输的漏洞
*对用户密码进行des加密,传到java端后再进行des解密
**/
/*
var fKey = mobileNo.substring(0);
var sKey = mobileNo.substring(4);
var tKey = mobileNo.substring(7);
*/
var mobileNoNew = strEncode(mobileNo, "pdcss", "css", "co");
var workNoNew = strEncode(workNo, "pdcss", "css", "co");
var passwordNew = strEncode(password, "pdcss", "css", "co");
document.getElementById("mobileNo").value = mobileNoNew;
document.getElementById("workNo").value = workNoNew;
document.getElementById("password").value = passwordNew;
obj.click = '';
document.forms[0].action="ajaxMainAction.action";
document.forms[0].submit();
}
function sendRandomUser(workNo, userName, password, yzmbox) {
try {
//document.getElementById("divSMSButton").innerHTML="";
if (userName == "") {
//document.getElementById("divSMSButton").innerHTML = '<span style="color: #005BAC;font-size:0.8em;cursor:hand;" id="getSMSCode" onclick="sendRandomUser();" >点击获取短信验证码</span>';
//userName.focus();
return;
}
if (workNo == "") {
//document.getElementById("divSMSButton").innerHTML = '<span style="color: #005BAC;font-size:0.8em;cursor:hand;" id="getSMSCode" onclick="sendRandomUser();" >点击获取短信验证码</span>';
//workNo.focus();
return;
}
var userNameValue = userName;
var workNoValue = workNo;
workNoValue = workNoValue.toUpperCase();
workNo = workNoValue;
var pattern = /^[A-Z]\d{6}$/;
var regExp = /^((((13[5-9]{1})|(147){1}|(178){1}|(198){1}|(15[0,1,2,7,8,9]{1})|(18[2,3,4,7,8]{1})){1}\d{1})|((134[0-8]{1}){1})|((3[0-9]{3}))){1}\d{6,7}$/;
if (!regExp.exec(userNameValue) || !pattern.test(workNoValue)) {
return 1;
}
if (password == "") {
//document.getElementById("divSMSButton").innerHTML = '<span style="color: #005BAC;font-size:0.8em;cursor:hand;" id="getSMSCode" onclick="sendRandomUser();" >点击获取短信验证码</span>';
//password.focus();
return 2;
}
if (yzmbox == "") {
//document.getElementById("divSMSButton").innerHTML = '<span style="color: #005BAC;font-size:0.8em;cursor:hand;" id="getSMSCode" onclick="sendRandomUser();" >点击获取短信验证码</span>';
yzmbox.focus();
return 3;
}
var mobileNoNew = strEncode(userName, "pdcss", "css", "co");
var workNoNew = strEncode(workNo, "pdcss", "css", "co");
var passwordNew = strEncode(password, "pdcss", "css", "co");
return [mobileNoNew, workNoNew, passwordNew]
} catch (e) {
return e
}
}
function onSubmitIt(){
enterClickNo = 1;
if(document.forms[0].serviceMobileNo.value==''){
alert('请输入变更号码');
document.forms[0].serviceMobileNo.focus();
enterClickNo = 0;
return false;
}
var ployId = document.getElementById('ployId').value;
if(ployId==''){alert("请选择迁转活动");return ;}
var productId = document.getElementById('productId').value;
if(productId==''){alert("请选择迁转套餐");return ;}
window.top.showEstopDiv();
document.getElementById("submitBtn").style.display = "none";
document.forms[0].action = "mealTransferSubmit.action";
document.forms[0].submit();
}
function userInfoCheck(mobileNoForGuest){
var url = "../s_channel/ajaxUserInfoCheck.action?useRequiredId=a6380b46-80ef-4ed8-9c8e-89280fc9aca2&pay.mobileNoForGuest="+mobileNoForGuest+"&t=463466";
window.top.showEstopDiv();
var userStauts = false;
$.ajax({
type: 'POST',
url: url,
async: false,//异步 true 同步false
success: function(data){
tds = data.split("~");
//2014-12-18 是否4G卡标红
var htmll=tds[1];
var if4g;
if(htmll.indexOf('<b>是否4G卡')>0){
if4g = htmll.substring(htmll.indexOf('<b>是否4G卡'),htmll.length);
htmll= htmll.substring(0,htmll.indexOf('<b>是否4G卡'));
if4g = "<span style='color:red'>"+if4g+"</span>";
tds[1] = htmll +if4g;
}
tds[1] +=" <b>归属地:</b><span style='color:red'>"+tds[8]+"</span>";
var html = "<strong>客户姓名:</strong>"+tds[0]+" "+tds[1];
$("#userInfo").html(html);
$("#userInfo1").html("相关信息:");
$("#userInfotr").show();
document.getElementById("bossId").value = tds[6];
window.top.hideEstopDiv();
if(tds[1]=="参数错误"||tds[2]=="参数错误")
{
userStauts = false;
}
else
{
userStauts = true;
}
},
dataType: "text"
});
}
function menuNodeClick(url, openModule, auth, urlType, id) {
var alistNo = '';
var my ='';
// var arr = alistNo.split("-");
// if ($.inArray(id, arr) != -1) {
// alert("对不起,该菜单已下线!");
// $('#' + id + "no").remove();
// return;
// }
if (url != 'undefined' && url != '' && url.indexOf('#') < 0) {
if (url.indexOf('?') == -1) {
url = url + '?auth=' + auth + "&t=" + parseInt(100000 * Math.random());
} else {
url = url + '&auth=' + auth + "&t=" + parseInt(100000 * Math.random());
}
if ('other' != urlType) {
url = "../" + url;
}
if (openModule == 'N') {
my = url;
} else {
if (url) {
// window.open(url);
return url
}
}
if (id) {
hide(id);
}
}
}
function yzmboxClick() {
//location.reload(true);
var rand = Math.random();
var url = "https://211.138.30.200/WSKF/s_channel/verifyCodeGenerator.action?rand=" + rand;
return url
}
function strEncode(data, firstKey, secondKey, thirdKey) {
var leng = data.length;
var encData = "";
var firstKeyBt, secondKeyBt, thirdKeyBt, firstLength, secondLength, thirdLength;
if (firstKey != null && firstKey != "") {
firstKeyBt = getKeyBytes(firstKey);
firstLength = firstKeyBt.length;
}
if (secondKey != null && secondKey != "") {
secondKeyBt = getKeyBytes(secondKey);
secondLength = secondKeyBt.length;
}
if (thirdKey != null && thirdKey != "") {
thirdKeyBt = getKeyBytes(thirdKey);
thirdLength = thirdKeyBt.length;
}
if (leng > 0) {
if (leng < 4) {
var bt = strToBt(data);
var encByte;
if (firstKey != null && firstKey != "" && secondKey != null && secondKey != "" && thirdKey != null && thirdKey != "") {
var tempBt;
var x, y, z;
tempBt = bt;
for (x = 0; x < firstLength; x++) {
tempBt = enc(tempBt, firstKeyBt[x]);
}
for (y = 0; y < secondLength; y++) {
tempBt = enc(tempBt, secondKeyBt[y]);
}
for (z = 0; z < thirdLength; z++) {
tempBt = enc(tempBt, thirdKeyBt[z]);
}
encByte = tempBt;
} else {
if (firstKey != null && firstKey != "" && secondKey != null && secondKey != "") {
var tempBt;
var x, y;
tempBt = bt;
for (x = 0; x < firstLength; x++) {
tempBt = enc(tempBt, firstKeyBt[x]);
}
for (y = 0; y < secondLength; y++) {
tempBt = enc(tempBt, secondKeyBt[y]);
}
encByte = tempBt;
} else {
if (firstKey != null && firstKey != "") {
var tempBt;
var x = 0;
tempBt = bt;
for (x = 0; x < firstLength; x++) {
tempBt = enc(tempBt, firstKeyBt[x]);
}
encByte = tempBt;
}
}
}
encData = bt64ToHex(encByte);
} else {
var iterator = parseInt(leng / 4);
var remainder = leng % 4;
var i = 0;
for (i = 0; i < iterator; i++) {
var tempData = data.substring(i * 4 + 0, i * 4 + 4);
var tempByte = strToBt(tempData);
var encByte;
if (firstKey != null && firstKey != "" && secondKey != null && secondKey != "" && thirdKey != null && thirdKey != "") {
var tempBt;
var x, y, z;
tempBt = tempByte;
for (x = 0; x < firstLength; x++) {
tempBt = enc(tempBt, firstKeyBt[x]);
}
for (y = 0; y < secondLength; y++) {
tempBt = enc(tempBt, secondKeyBt[y]);
}
for (z = 0; z < thirdLength; z++) {
tempBt = enc(tempBt, thirdKeyBt[z]);
}
encByte = tempBt;
} else {
if (firstKey != null && firstKey != "" && secondKey != null && secondKey != "") {
var tempBt;
var x, y;
tempBt = tempByte;
for (x = 0; x < firstLength; x++) {
tempBt = enc(tempBt, firstKeyBt[x]);
}
for (y = 0; y < secondLength; y++) {
tempBt = enc(tempBt, secondKeyBt[y]);
}
encByte = tempBt;
} else {
if (firstKey != null && firstKey != "") {
var tempBt;
var x;
tempBt = tempByte;
for (x = 0; x < firstLength; x++) {
tempBt = enc(tempBt, firstKeyBt[x]);
}
encByte = tempBt;
}
}
}
encData += bt64ToHex(encByte);
}
if (remainder > 0) {
var remainderData = data.substring(iterator * 4 + 0, leng);
var tempByte = strToBt(remainderData);
var encByte;
if (firstKey != null && firstKey != "" && secondKey != null && secondKey != "" && thirdKey != null && thirdKey != "") {
var tempBt;
var x, y, z;
tempBt = tempByte;
for (x = 0; x < firstLength; x++) {
tempBt = enc(tempBt, firstKeyBt[x]);
}
for (y = 0; y < secondLength; y++) {
tempBt = enc(tempBt, secondKeyBt[y]);
}
for (z = 0; z < thirdLength; z++) {
tempBt = enc(tempBt, thirdKeyBt[z]);
}
encByte = tempBt;
} else {
if (firstKey != null && firstKey != "" && secondKey != null && secondKey != "") {
var tempBt;
var x, y;
tempBt = tempByte;
for (x = 0; x < firstLength; x++) {
tempBt = enc(tempBt, firstKeyBt[x]);
}
for (y = 0; y < secondLength; y++) {
tempBt = enc(tempBt, secondKeyBt[y]);
}
encByte = tempBt;
} else {
if (firstKey != null && firstKey != "") {
var tempBt;
var x;
tempBt = tempByte;
for (x = 0; x < firstLength; x++) {
tempBt = enc(tempBt, firstKeyBt[x]);
}
encByte = tempBt;
}
}
}
encData += bt64ToHex(encByte);
}
}
}
return encData;
}
/*
* decrypt the encrypted string to the original string
*
* return the original string
*/
/*解密的函数,三个密钥(解密密钥同加密密钥)*/
function strDecode(data, firstKey, secondKey, thirdKey) {
var leng = data.length;
var decStr = "";
var firstKeyBt, secondKeyBt, thirdKeyBt, firstLength, secondLength, thirdLength;
if (firstKey != null && firstKey != "") {
firstKeyBt = getKeyBytes(firstKey);
firstLength = firstKeyBt.length;
}
if (secondKey != null && secondKey != "") {
secondKeyBt = getKeyBytes(secondKey);
secondLength = secondKeyBt.length;
}
if (thirdKey != null && thirdKey != "") {
thirdKeyBt = getKeyBytes(thirdKey);
thirdLength = thirdKeyBt.length;
}
var iterator = parseInt(leng / 16);
var i = 0;
for (i = 0; i < iterator; i++) {
var tempData = data.substring(i * 16 + 0, i * 16 + 16);
var strByte = hexToBt64(tempData);
var intByte = new Array(64);
var j = 0;
for (j = 0; j < 64; j++) {
intByte[j] = parseInt(strByte.substring(j, j + 1));
}
var decByte;
if (firstKey != null && firstKey != "" && secondKey != null && secondKey != "" && thirdKey != null && thirdKey != "") {
var tempBt;
var x, y, z;
tempBt = intByte;
for (x = thirdLength - 1; x >= 0; x--) {
tempBt = dec(tempBt, thirdKeyBt[x]);
}
for (y = secondLength - 1; y >= 0; y--) {
tempBt = dec(tempBt, secondKeyBt[y]);
}
for (z = firstLength - 1; z >= 0; z--) {
tempBt = dec(tempBt, firstKeyBt[z]);
}
decByte = tempBt;
} else {
if (firstKey != null && firstKey != "" && secondKey != null && secondKey != "") {
var tempBt;
var x, y, z;
tempBt = intByte;
for (x = secondLength - 1; x >= 0; x--) {
tempBt = dec(tempBt, secondKeyBt[x]);
}
for (y = firstLength - 1; y >= 0; y--) {
tempBt = dec(tempBt, firstKeyBt[y]);
}
decByte = tempBt;
} else {
if (firstKey != null && firstKey != "") {
var tempBt;
var x, y, z;
tempBt = intByte;
for (x = firstLength - 1; x >= 0; x--) {
tempBt = dec(tempBt, firstKeyBt[x]);
}
decByte = tempBt;
}
}
}
decStr += byteToString(decByte);
}
return decStr;
}
/*
* chang the string into the bit array
*
* return bit array(it's length % 64 = 0)
*/
function getKeyBytes(key) {
var keyBytes = new Array();
var leng = key.length;
var iterator = parseInt(leng / 4);
var remainder = leng % 4;
var i = 0;
for (i = 0; i < iterator; i++) {
keyBytes[i] = strToBt(key.substring(i * 4 + 0, i * 4 + 4));
}
if (remainder > 0) {
keyBytes[i] = strToBt(key.substring(i * 4 + 0, leng));
}
return keyBytes;
}
/*
* chang the string(it's length <= 4) into the bit array
*
* return bit array(it's length = 64)
*/
function strToBt(str) {
var leng = str.length;
var bt = new Array(64);
if (leng < 4) {
var i = 0, j = 0, p = 0, q = 0;
for (i = 0; i < leng; i++) {
var k = str.charCodeAt(i);
for (j = 0; j < 16; j++) {
var pow = 1, m = 0;
for (m = 15; m > j; m--) {
pow *= 2;
}
bt[16 * i + j] = parseInt(k / pow) % 2;
}
}
for (p = leng; p < 4; p++) {
var k = 0;
for (q = 0; q < 16; q++) {
var pow = 1, m = 0;
for (m = 15; m > q; m--) {
pow *= 2;
}
bt[16 * p + q] = parseInt(k / pow) % 2;
}
}
} else {
for (i = 0; i < 4; i++) {
var k = str.charCodeAt(i);
for (j = 0; j < 16; j++) {
var pow = 1;
for (m = 15; m > j; m--) {
pow *= 2;
}
bt[16 * i + j] = parseInt(k / pow) % 2;
}
}
}
return bt;
}
/*
* chang the bit(it's length = 4) into the hex
*
* return hex
*/
function bt4ToHex(binary) {
var hex;
switch (binary) {
case "0000" :
hex = "0";
break;
case "0001" :
hex = "1";
break;
case "0010" :
hex = "2";
break;
case "0011" :
hex = "3";
break;
case "0100" :
hex = "4";
break;
case "0101" :
hex = "5";
break;
case "0110" :
hex = "6";
break;
case "0111" :
hex = "7";
break;
case "1000" :
hex = "8";
break;
case "1001" :
hex = "9";
break;
case "1010" :
hex = "A";
break;
case "1011" :
hex = "B";
break;
case "1100" :
hex = "C";
break;
case "1101" :
hex = "D";
break;
case "1110" :
hex = "E";
break;
case "1111" :
hex = "F";
break;
}
return hex;
}
/*
* chang the hex into the bit(it's length = 4)
*
* return the bit(it's length = 4)
*/
function hexToBt4(hex) {
var binary;
switch (hex) {
case "0" :
binary = "0000";
break;
case "1" :
binary = "0001";
break;
case "2" :
binary = "0010";
break;
case "3" :
binary = "0011";
break;
case "4" :
binary = "0100";
break;
case "5" :
binary = "0101";
break;
case "6" :
binary = "0110";
break;
case "7" :
binary = "0111";
break;
case "8" :
binary = "1000";
break;
case "9" :
binary = "1001";
break;
case "A" :
binary = "1010";
break;
case "B" :
binary = "1011";
break;
case "C" :
binary = "1100";
break;
case "D" :
binary = "1101";
break;
case "E" :
binary = "1110";
break;
case "F" :
binary = "1111";
break;
}
return binary;
}
/*
* chang the bit(it's length = 64) into the string
*
* return string
*/
function byteToString(byteData) {
var str = "";
for (i = 0; i < 4; i++) {
var count = 0;
for (j = 0; j < 16; j++) {
var pow = 1;
for (m = 15; m > j; m--) {
pow *= 2;
}
count += byteData[16 * i + j] * pow;
}
if (count != 0) {
str += String.fromCharCode(count);
}
}
return str;
}
function bt64ToHex(byteData) {
var hex = "";
for (i = 0; i < 16; i++) {
var bt = "";
for (j = 0; j < 4; j++) {
bt += byteData[i * 4 + j];
}
hex += bt4ToHex(bt);
}
return hex;
}
function hexToBt64(hex) {
var binary = "";
for (i = 0; i < 16; i++) {
binary += hexToBt4(hex.substring(i, i + 1));
}
return binary;
}
/*
* the 64 bit des core arithmetic
*/
function enc(dataByte, keyByte) {
var keys = generateKeys(keyByte);
var ipByte = initPermute(dataByte);
var ipLeft = new Array(32);
var ipRight = new Array(32);
var tempLeft = new Array(32);
var i = 0, j = 0, k = 0, m = 0, n = 0;
for (k = 0; k < 32; k++) {
ipLeft[k] = ipByte[k];
ipRight[k] = ipByte[32 + k];
}
for (i = 0; i < 16; i++) {
for (j = 0; j < 32; j++) {
tempLeft[j] = ipLeft[j];
ipLeft[j] = ipRight[j];
}
var key = new Array(48);
for (m = 0; m < 48; m++) {
key[m] = keys[i][m];
}
var tempRight = xor(pPermute(sBoxPermute(xor(expandPermute(ipRight), key))), tempLeft);
for (n = 0; n < 32; n++) {
ipRight[n] = tempRight[n];
}
}
var finalData = new Array(64);
for (i = 0; i < 32; i++) {
finalData[i] = ipRight[i];
finalData[32 + i] = ipLeft[i];
}
return finallyPermute(finalData);
}
function dec(dataByte, keyByte) {
var keys = generateKeys(keyByte);
var ipByte = initPermute(dataByte);
var ipLeft = new Array(32);
var ipRight = new Array(32);
var tempLeft = new Array(32);
var i = 0, j = 0, k = 0, m = 0, n = 0;
for (k = 0; k < 32; k++) {
ipLeft[k] = ipByte[k];
ipRight[k] = ipByte[32 + k];
}
for (i = 15; i >= 0; i--) {
for (j = 0; j < 32; j++) {
tempLeft[j] = ipLeft[j];
ipLeft[j] = ipRight[j];
}
var key = new Array(48);
for (m = 0; m < 48; m++) {
key[m] = keys[i][m];
}
var tempRight = xor(pPermute(sBoxPermute(xor(expandPermute(ipRight), key))), tempLeft);
for (n = 0; n < 32; n++) {
ipRight[n] = tempRight[n];
}
}
var finalData = new Array(64);
for (i = 0; i < 32; i++) {
finalData[i] = ipRight[i];
finalData[32 + i] = ipLeft[i];
}
return finallyPermute(finalData);
}
function initPermute(originalData) {
var ipByte = new Array(64);
for (i = 0, m = 1, n = 0; i < 4; i++, m += 2, n += 2) {
for (j = 7, k = 0; j >= 0; j--, k++) {
ipByte[i * 8 + k] = originalData[j * 8 + m];
ipByte[i * 8 + k + 32] = originalData[j * 8 + n];
}
}
return ipByte;
}
function expandPermute(rightData) {
var epByte = new Array(48);
for (i = 0; i < 8; i++) {
if (i == 0) {
epByte[i * 6 + 0] = rightData[31];
} else {
epByte[i * 6 + 0] = rightData[i * 4 - 1];
}
epByte[i * 6 + 1] = rightData[i * 4 + 0];
epByte[i * 6 + 2] = rightData[i * 4 + 1];
epByte[i * 6 + 3] = rightData[i * 4 + 2];
epByte[i * 6 + 4] = rightData[i * 4 + 3];
if (i == 7) {
epByte[i * 6 + 5] = rightData[0];
} else {
epByte[i * 6 + 5] = rightData[i * 4 + 4];
}
}
return epByte;
}
function xor(byteOne, byteTwo) {
var xorByte = new Array(byteOne.length);
for (i = 0; i < byteOne.length; i++) {
xorByte[i] = byteOne[i] ^ byteTwo[i];
}
return xorByte;
}
function sBoxPermute(expandByte) {
var sBoxByte = new Array(32);
var binary = "";
var s1 = [
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]];
/* Table - s2 */
var s2 = [
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]];
/* Table - s3 */
var s3 = [
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]];
/* Table - s4 */
var s4 = [
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]];
/* Table - s5 */
var s5 = [
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]];
/* Table - s6 */
var s6 = [
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]];
/* Table - s7 */
var s7 = [
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]];
/* Table - s8 */
var s8 = [
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]];
for (m = 0; m < 8; m++) {
var i = 0, j = 0;
i = expandByte[m * 6 + 0] * 2 + expandByte[m * 6 + 5];
j = expandByte[m * 6 + 1] * 2 * 2 * 2
+ expandByte[m * 6 + 2] * 2 * 2
+ expandByte[m * 6 + 3] * 2
+ expandByte[m * 6 + 4];
switch (m) {
case 0 :
binary = getBoxBinary(s1[i][j]);
break;
case 1 :
binary = getBoxBinary(s2[i][j]);
break;
case 2 :
binary = getBoxBinary(s3[i][j]);
break;
case 3 :
binary = getBoxBinary(s4[i][j]);
break;
case 4 :
binary = getBoxBinary(s5[i][j]);
break;
case 5 :
binary = getBoxBinary(s6[i][j]);
break;
case 6 :
binary = getBoxBinary(s7[i][j]);
break;
case 7 :
binary = getBoxBinary(s8[i][j]);
break;
}
sBoxByte[m * 4 + 0] = parseInt(binary.substring(0, 1));
sBoxByte[m * 4 + 1] = parseInt(binary.substring(1, 2));
sBoxByte[m * 4 + 2] = parseInt(binary.substring(2, 3));
sBoxByte[m * 4 + 3] = parseInt(binary.substring(3, 4));
}
return sBoxByte;
}
function pPermute(sBoxByte) {
var pBoxPermute = new Array(32);
pBoxPermute[0] = sBoxByte[15];
pBoxPermute[1] = sBoxByte[6];
pBoxPermute[2] = sBoxByte[19];
pBoxPermute[3] = sBoxByte[20];
pBoxPermute[4] = sBoxByte[28];
pBoxPermute[5] = sBoxByte[11];
pBoxPermute[6] = sBoxByte[27];
pBoxPermute[7] = sBoxByte[16];
pBoxPermute[8] = sBoxByte[0];
pBoxPermute[9] = sBoxByte[14];
pBoxPermute[10] = sBoxByte[22];
pBoxPermute[11] = sBoxByte[25];
pBoxPermute[12] = sBoxByte[4];
pBoxPermute[13] = sBoxByte[17];
pBoxPermute[14] = sBoxByte[30];
pBoxPermute[15] = sBoxByte[9];
pBoxPermute[16] = sBoxByte[1];
pBoxPermute[17] = sBoxByte[7];
pBoxPermute[18] = sBoxByte[23];
pBoxPermute[19] = sBoxByte[13];
pBoxPermute[20] = sBoxByte[31];
pBoxPermute[21] = sBoxByte[26];
pBoxPermute[22] = sBoxByte[2];
pBoxPermute[23] = sBoxByte[8];
pBoxPermute[24] = sBoxByte[18];
pBoxPermute[25] = sBoxByte[12];
pBoxPermute[26] = sBoxByte[29];
pBoxPermute[27] = sBoxByte[5];
pBoxPermute[28] = sBoxByte[21];
pBoxPermute[29] = sBoxByte[10];
pBoxPermute[30] = sBoxByte[3];
pBoxPermute[31] = sBoxByte[24];
return pBoxPermute;
}
function finallyPermute(endByte) {
var fpByte = new Array(64);
fpByte[0] = endByte[39];
fpByte[1] = endByte[7];
fpByte[2] = endByte[47];
fpByte[3] = endByte[15];
fpByte[4] = endByte[55];
fpByte[5] = endByte[23];
fpByte[6] = endByte[63];
fpByte[7] = endByte[31];
fpByte[8] = endByte[38];
fpByte[9] = endByte[6];
fpByte[10] = endByte[46];
fpByte[11] = endByte[14];
fpByte[12] = endByte[54];
fpByte[13] = endByte[22];
fpByte[14] = endByte[62];
fpByte[15] = endByte[30];
fpByte[16] = endByte[37];
fpByte[17] = endByte[5];
fpByte[18] = endByte[45];
fpByte[19] = endByte[13];
fpByte[20] = endByte[53];
fpByte[21] = endByte[21];
fpByte[22] = endByte[61];
fpByte[23] = endByte[29];
fpByte[24] = endByte[36];
fpByte[25] = endByte[4];
fpByte[26] = endByte[44];
fpByte[27] = endByte[12];
fpByte[28] = endByte[52];
fpByte[29] = endByte[20];
fpByte[30] = endByte[60];
fpByte[31] = endByte[28];
fpByte[32] = endByte[35];
fpByte[33] = endByte[3];
fpByte[34] = endByte[43];
fpByte[35] = endByte[11];
fpByte[36] = endByte[51];
fpByte[37] = endByte[19];
fpByte[38] = endByte[59];
fpByte[39] = endByte[27];
fpByte[40] = endByte[34];
fpByte[41] = endByte[2];
fpByte[42] = endByte[42];
fpByte[43] = endByte[10];
fpByte[44] = endByte[50];
fpByte[45] = endByte[18];
fpByte[46] = endByte[58];
fpByte[47] = endByte[26];
fpByte[48] = endByte[33];
fpByte[49] = endByte[1];
fpByte[50] = endByte[41];
fpByte[51] = endByte[9];
fpByte[52] = endByte[49];
fpByte[53] = endByte[17];
fpByte[54] = endByte[57];
fpByte[55] = endByte[25];
fpByte[56] = endByte[32];
fpByte[57] = endByte[0];
fpByte[58] = endByte[40];
fpByte[59] = endByte[8];
fpByte[60] = endByte[48];
fpByte[61] = endByte[16];
fpByte[62] = endByte[56];
fpByte[63] = endByte[24];
return fpByte;
}
function getBoxBinary(i) {
var binary = "";
switch (i) {
case 0 :
binary = "0000";
break;
case 1 :
binary = "0001";
break;
case 2 :
binary = "0010";
break;
case 3 :
binary = "0011";
break;
case 4 :
binary = "0100";
break;
case 5 :
binary = "0101";
break;
case 6 :
binary = "0110";
break;
case 7 :
binary = "0111";
break;
case 8 :
binary = "1000";
break;
case 9 :
binary = "1001";
break;
case 10 :
binary = "1010";
break;
case 11 :
binary = "1011";
break;
case 12 :
binary = "1100";
break;
case 13 :
binary = "1101";
break;
case 14 :
binary = "1110";
break;
case 15 :
binary = "1111";
break;
}
return binary;
}
/*
* generate 16 keys for xor
*
*/
function generateKeys(keyByte) {
var key = new Array(56);
var keys = new Array();
keys[0] = new Array();
keys[1] = new Array();
keys[2] = new Array();
keys[3] = new Array();
keys[4] = new Array();
keys[5] = new Array();
keys[6] = new Array();
keys[7] = new Array();
keys[8] = new Array();
keys[9] = new Array();
keys[10] = new Array();
keys[11] = new Array();
keys[12] = new Array();
keys[13] = new Array();
keys[14] = new Array();
keys[15] = new Array();
var loop = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1];
for (i = 0; i < 7; i++) {
for (j = 0, k = 7; j < 8; j++, k--) {
key[i * 8 + j] = keyByte[8 * k + i];
}
}
var i = 0;
for (i = 0; i < 16; i++) {
var tempLeft = 0;
var tempRight = 0;
for (j = 0; j < loop[i]; j++) {
tempLeft = key[0];
tempRight = key[28];
for (k = 0; k < 27; k++) {
key[k] = key[k + 1];
key[28 + k] = key[29 + k];
}
key[27] = tempLeft;
key[55] = tempRight;
}
var tempKey = new Array(48);
tempKey[0] = key[13];
tempKey[1] = key[16];
tempKey[2] = key[10];
tempKey[3] = key[23];
tempKey[4] = key[0];
tempKey[5] = key[4];
tempKey[6] = key[2];
tempKey[7] = key[27];
tempKey[8] = key[14];
tempKey[9] = key[5];
tempKey[10] = key[20];
tempKey[11] = key[9];
tempKey[12] = key[22];
tempKey[13] = key[18];
tempKey[14] = key[11];
tempKey[15] = key[3];
tempKey[16] = key[25];
tempKey[17] = key[7];
tempKey[18] = key[15];
tempKey[19] = key[6];
tempKey[20] = key[26];
tempKey[21] = key[19];
tempKey[22] = key[12];
tempKey[23] = key[1];
tempKey[24] = key[40];
tempKey[25] = key[51];
tempKey[26] = key[30];
tempKey[27] = key[36];
tempKey[28] = key[46];
tempKey[29] = key[54];
tempKey[30] = key[29];
tempKey[31] = key[39];
tempKey[32] = key[50];
tempKey[33] = key[44];
tempKey[34] = key[32];
tempKey[35] = key[47];
tempKey[36] = key[43];
tempKey[37] = key[48];
tempKey[38] = key[38];
tempKey[39] = key[55];
tempKey[40] = key[33];
tempKey[41] = key[52];
tempKey[42] = key[45];
tempKey[43] = key[41];
tempKey[44] = key[49];
tempKey[45] = key[35];
tempKey[46] = key[28];
tempKey[47] = key[31];
switch (i) {
case 0:
for (m = 0; m < 48; m++) {
keys[0][m] = tempKey[m];
}
break;
case 1:
for (m = 0; m < 48; m++) {
keys[1][m] = tempKey[m];
}
break;
case 2:
for (m = 0; m < 48; m++) {
keys[2][m] = tempKey[m];
}
break;
case 3:
for (m = 0; m < 48; m++) {
keys[3][m] = tempKey[m];
}
break;
case 4:
for (m = 0; m < 48; m++) {
keys[4][m] = tempKey[m];
}
break;
case 5:
for (m = 0; m < 48; m++) {
keys[5][m] = tempKey[m];
}
break;
case 6:
for (m = 0; m < 48; m++) {
keys[6][m] = tempKey[m];
}
break;
case 7:
for (m = 0; m < 48; m++) {
keys[7][m] = tempKey[m];
}
break;
case 8:
for (m = 0; m < 48; m++) {
keys[8][m] = tempKey[m];
}
break;
case 9:
for (m = 0; m < 48; m++) {
keys[9][m] = tempKey[m];
}
break;
case 10:
for (m = 0; m < 48; m++) {
keys[10][m] = tempKey[m];
}
break;
case 11:
for (m = 0; m < 48; m++) {
keys[11][m] = tempKey[m];
}
break;
case 12:
for (m = 0; m < 48; m++) {
keys[12][m] = tempKey[m];
}
break;
case 13:
for (m = 0; m < 48; m++) {
keys[13][m] = tempKey[m];
}
break;
case 14:
for (m = 0; m < 48; m++) {
keys[14][m] = tempKey[m];
}
break;
case 15:
for (m = 0; m < 48; m++) {
keys[15][m] = tempKey[m];
}
break;
}
}
return keys;
}
/*end*/'''
return file
if __name__ == '__main__':
# clientMac = '00:25:11:EB:27:98'
# login_phone = '18303995539'
# workNo = 'J120249'
# login_password = '100861'
clientMac = ''
login_phone = ''
workNo = ''
login_password = ''
app = wx.App()
login_frame = LOGIN(parent=None, id=-1)
action_frame = YD_MAKE_ORDER(parent=None, id=-1)
login_frame.Show()
login_frame.Center()
# frame = YD_MAKE_ORDER(parent=None, id=-1)
# frame.Show()
# frame.Center()
app.MainLoop()
| null | uploadFiles/ymc/main_old.py | main_old.py | py | 104,427 | python | en | code | null | code-starcoder2 | 51 |
288467779 | # Copyright (c) 2021 The Toltec Contributors
# SPDX-License-Identifier: MIT
"""Build recipes and create packages."""
import shutil
from typing import (
Any,
Deque,
Dict,
Iterable,
List,
MutableMapping,
Optional,
Tuple,
)
from collections import deque
import re
import os
import logging
import textwrap
import docker
import requests
from . import bash, util, ipk, paths
from .recipe import Recipe, Package
from .version import DependencyKind
logger = logging.getLogger(__name__)
class BuildError(Exception):
"""Raised when a build step fails."""
class BuildContextAdapter(logging.LoggerAdapter):
"""Prefix log entries with information about the current build target."""
def process(
self, msg: str, kwargs: MutableMapping[str, Any]
) -> Tuple[str, MutableMapping[str, Any]]:
prefix = ""
if "recipe" in self.extra:
prefix += self.extra["recipe"]
if "package" in self.extra:
prefix += f" ({self.extra['package']})"
if prefix:
return f"{prefix}: {msg}", kwargs
return msg, kwargs
class Builder: # pylint: disable=too-few-public-methods
"""Helper class for building recipes."""
# Detect non-local paths
URL_REGEX = re.compile(r"[a-z]+://")
# Prefix for all Toltec Docker images
IMAGE_PREFIX = "ghcr.io/toltec-dev/"
# Toltec Docker image used for generic tasks
DEFAULT_IMAGE = "toolchain:v1.3.1"
def __init__(self, work_dir: str, repo_dir: str) -> None:
"""
Create a builder helper.
:param work_dir: directory where packages are built
:param repo_dir: directory where built packages are stored
"""
self.work_dir = work_dir
os.makedirs(work_dir, exist_ok=True)
self.repo_dir = repo_dir
os.makedirs(repo_dir, exist_ok=True)
self.install_lib = ""
install_lib_path = os.path.join(paths.SCRIPTS_DIR, "install-lib")
with open(install_lib_path, "r") as file:
for line in file:
if not line.strip().startswith("#"):
self.install_lib += line
self.context: Dict[str, str] = {}
self.adapter = BuildContextAdapter(logger, self.context)
try:
self.docker = docker.from_env()
except docker.errors.DockerException as err:
raise BuildError(
"Unable to connect to the Docker daemon. \
Please check that the service is running and that you have the necessary \
permissions."
) from err
def make(
self, recipe: Recipe, packages: Optional[Iterable[Package]] = None
) -> bool:
"""
Build a recipe and create its associated packages.
:param recipe: recipe to make
:param packages: list of packages of the recipe to make
(default: all of them)
:returns: true if all packages were built correctly
"""
self.context["recipe"] = recipe.name
build_dir = os.path.join(self.work_dir, recipe.name)
if not util.check_directory(
build_dir,
f"The build directory '{os.path.relpath(build_dir)}' for recipe \
'{recipe.name}' already exists.\nWould you like to [c]ancel, [r]emove \
that directory, or [k]eep it (not recommended)?",
):
return False
src_dir = os.path.join(build_dir, "src")
os.makedirs(src_dir, exist_ok=True)
base_pkg_dir = os.path.join(build_dir, "pkg")
os.makedirs(base_pkg_dir, exist_ok=True)
self._fetch_source(recipe, src_dir)
self._prepare(recipe, src_dir)
self._build(recipe, src_dir)
self._strip(recipe, src_dir)
for package in (
packages if packages is not None else recipe.packages.values()
):
self.context["package"] = package.name
pkg_dir = os.path.join(base_pkg_dir, package.name)
os.makedirs(pkg_dir, exist_ok=True)
self._package(package, src_dir, pkg_dir)
self._archive(package, pkg_dir)
del self.context["package"]
return True
def _fetch_source(
self,
recipe: Recipe,
src_dir: str,
) -> None:
"""Fetch and extract all source files required to build a recipe."""
self.adapter.info("Fetching source files")
for source in recipe.sources:
filename = os.path.basename(source.url)
local_path = os.path.join(src_dir, filename)
if self.URL_REGEX.match(source.url) is None:
# Get source file from the recipe’s directory
shutil.copy2(os.path.join(recipe.path, source.url), local_path)
else:
# Fetch source file from the network
req = requests.get(source.url)
if req.status_code != 200:
raise BuildError(
f"Unexpected status code while fetching \
source file '{source.url}', got {req.status_code}"
)
with open(local_path, "wb") as local:
for chunk in req.iter_content(chunk_size=1024):
local.write(chunk)
# Verify checksum
if (
source.checksum != "SKIP"
and util.file_sha256(local_path) != source.checksum
):
raise BuildError(
f"Invalid checksum for source file {source.url}"
)
# Automatically extract source archives
if not source.noextract:
if not util.auto_extract(local_path, src_dir):
self.adapter.debug(
"Not extracting %s (unsupported archive type)",
local_path,
)
def _prepare(self, recipe: Recipe, src_dir: str) -> None:
"""Prepare source files before building."""
script = recipe.functions["prepare"]
if not script:
self.adapter.debug("Skipping prepare (nothing to do)")
return
self.adapter.info("Preparing source files")
logs = bash.run_script(
script=script,
variables={
**recipe.variables,
**recipe.custom_variables,
"srcdir": src_dir,
},
)
self._print_logs(logs, "prepare()")
def _build(self, recipe: Recipe, src_dir: str) -> None:
"""Build artifacts for a recipe."""
script = recipe.functions["build"]
if not script:
self.adapter.debug("Skipping build (nothing to do)")
return
self.adapter.info("Building artifacts")
# Set fixed atime and mtime for all the source files
epoch = int(recipe.timestamp.timestamp())
for filename in util.list_tree(src_dir):
os.utime(filename, (epoch, epoch))
mount_src = "/src"
repo_src = "/repo"
uid = os.getuid()
pre_script: List[str] = []
# Install required dependencies
build_deps = []
host_deps = []
for dep in recipe.makedepends:
if dep.kind == DependencyKind.Build:
build_deps.append(dep.package)
elif dep.kind == DependencyKind.Host:
host_deps.append(dep.package)
if build_deps:
pre_script.extend(
(
"export DEBIAN_FRONTEND=noninteractive",
"apt-get update -qq",
"apt-get install -qq --no-install-recommends"
' -o Dpkg::Options::="--force-confdef"'
' -o Dpkg::Options::="--force-confold"'
" -- " + " ".join(build_deps),
)
)
if host_deps:
pre_script.extend(
(
"opkg update --verbosity=0 --offline-root $SYSROOT",
"opkg install --verbosity=0 --no-install-recommends"
" --offline-root $SYSROOT"
" -- " + " ".join(host_deps),
)
)
logs = bash.run_script_in_container(
self.docker,
image=self.IMAGE_PREFIX + recipe.image,
mounts=[
docker.types.Mount(
type="bind",
source=os.path.abspath(src_dir),
target=mount_src,
),
docker.types.Mount(
type="bind",
source=os.path.abspath(self.repo_dir),
target=repo_src,
),
],
variables={
**recipe.variables,
**recipe.custom_variables,
"srcdir": mount_src,
},
script="\n".join(
(
*pre_script,
f'cd "{mount_src}"',
script,
f'chown -R {uid}:{uid} "{mount_src}"',
)
),
)
self._print_logs(logs, "build()")
def _strip(self, recipe: Recipe, src_dir: str) -> None:
"""Strip all debugging symbols from binaries."""
if "nostrip" in recipe.flags:
self.adapter.debug("Not stripping binaries (nostrip flag set)")
return
self.adapter.info("Stripping binaries")
mount_src = "/src"
logs = bash.run_script_in_container(
self.docker,
image=self.IMAGE_PREFIX + self.DEFAULT_IMAGE,
mounts=[
docker.types.Mount(
type="bind",
source=os.path.abspath(src_dir),
target=mount_src,
)
],
variables={},
script="\n".join(
(
# Strip binaries in the target arch
f'find "{mount_src}" -type f -executable -print0 \
| xargs --no-run-if-empty --null "${{CROSS_COMPILE}}strip" --strip-all || true',
# Strip binaries in the host arch
f'find "{mount_src}" -type f -executable -print0 \
| xargs --no-run-if-empty --null strip --strip-all || true',
)
),
)
self._print_logs(logs)
def _package(self, package: Package, src_dir: str, pkg_dir: str) -> None:
"""Make a package from a recipe’s build artifacts."""
self.adapter.info("Packaging build artifacts")
logs = bash.run_script(
script=package.functions["package"],
variables={
**package.variables,
**package.custom_variables,
"srcdir": src_dir,
"pkgdir": pkg_dir,
},
)
self._print_logs(logs, "package()")
self.adapter.debug("Resulting tree:")
for filename in util.list_tree(pkg_dir):
self.adapter.debug(
" - %s",
os.path.normpath(
os.path.join("/", os.path.relpath(filename, pkg_dir))
),
)
def _archive(self, package: Package, pkg_dir: str) -> None:
"""Create an archive for a package."""
self.adapter.info("Creating archive")
ar_path = os.path.join(paths.REPO_DIR, package.filename())
# Inject Oxide-specific hook for reloading apps
if os.path.exists(os.path.join(pkg_dir, "opt/usr/share/applications")):
oxide_hook = "\nreload-oxide-apps\n"
package.functions["configure"] += oxide_hook
package.functions["postupgrade"] += oxide_hook
package.functions["postremove"] += oxide_hook
# Convert install scripts to Debian format
scripts = {}
script_header = "\n".join(
(
textwrap.dedent(
"""\
#!/usr/bin/env bash
set -euo pipefail
"""
),
bash.put_variables(
{
**package.parent.variables,
**package.variables,
**package.custom_variables,
}
),
bash.put_functions(package.custom_functions),
self.install_lib,
)
)
for name, script, action in (
("preinstall", "preinst", "install"),
("configure", "postinst", "configure"),
):
if package.functions[name]:
scripts[script] = "\n".join(
(
script_header,
textwrap.dedent(
f"""\
if [[ $1 = {action} ]]; then
script() {{
"""
),
package.functions[name],
textwrap.dedent(
"""\
}
script
fi
"""
),
)
)
for step in ("pre", "post"):
if (
package.functions[step + "upgrade"]
or package.functions[step + "remove"]
):
script = script_header
for action in ("upgrade", "remove"):
if package.functions[step + action]:
script += "\n".join(
(
textwrap.dedent(
f"""\
if [[ $1 = {action} ]]; then
script() {{
"""
),
package.functions[step + action],
textwrap.dedent(
"""\
}
script
fi
"""
),
)
)
scripts[step + "rm"] = script
self.adapter.debug("Install scripts:")
if scripts:
for script in sorted(scripts):
self.adapter.debug(" - %s", script)
else:
self.adapter.debug("(none)")
epoch = int(package.parent.timestamp.timestamp())
with open(ar_path, "wb") as file:
ipk.make_ipk(
file,
epoch=epoch,
pkg_dir=pkg_dir,
metadata=package.control_fields(),
scripts=scripts,
)
# Set fixed atime and mtime for the resulting archive
os.utime(ar_path, (epoch, epoch))
def _print_logs(
self,
logs: bash.LogGenerator,
function_name: str = None,
max_lines_on_fail: int = 50,
) -> None:
"""
Print logs to the debug output or buffer and print the last n log lines
if a ScriptError is caught.
:param logs: generator of log lines
:param function_name: calling function name
:param max_lines_on_fail: number of context lines to print
in non-debug mode
"""
log_buffer: Deque[str] = deque()
try:
for line in logs:
if self.adapter.getEffectiveLevel() <= logging.DEBUG:
self.adapter.debug(line)
else:
if len(log_buffer) == max_lines_on_fail:
log_buffer.popleft()
log_buffer.append(line)
except bash.ScriptError as err:
if len(log_buffer) > 0:
self.adapter.info(
f"Only showing up to {max_lines_on_fail} lines of context. "
+ "Use --verbose for the full output."
)
for line in log_buffer:
self.adapter.error(line)
if function_name:
self.adapter.error(f"{function_name} failed")
raise err
| null | scripts/toltec/builder.py | builder.py | py | 16,413 | python | en | code | null | code-starcoder2 | 51 |
73170860 | #!/usr/bin/env python
# coding: utf-8
import sys
from hashlib import md5
from six import print_
def mine(secret):
i = 0
while True:
current = secret + str(i).encode("ascii")
digest = md5(current).digest()
if digest[0] == 0 and digest[1] == 0 and digest[2] <= 0x0f:
return i
i += 1
def main():
print_(mine(b"yzbqklnj"))
if __name__ == "__main__":
main()
| null | day4-1.py | day4-1.py | py | 421 | python | en | code | null | code-starcoder2 | 51 |
522031771 | # -*- coding: utf-8 -*-
from pytorchtools import EarlyStopping
import torch
import torch as t
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
from torch_model.Deep_NMT_model import LMLoss
from torch_model.Attention_NMT import AttentionNMT
from data.iwslt_Data_Loader import iwslt_Data
import numpy as np
from tqdm import tqdm
from nltk.translate.bleu_score import corpus_bleu
import config as argumentparser
config = argumentparser.ArgumentParser()
config.filters = list(map(int,config.filters.split(",")))
torch.manual_seed(config.seed)
if torch.cuda.is_available():
torch.cuda.set_device(config.gpu)
def get_dev_loss(model, criterion, data_iter):
model.eval()
process_bar = tqdm(data_iter)
loss = 0
for source_data, target_data_input, target_data in process_bar:
if config.cuda and torch.cuda.is_available():
source_data = source_data.cuda()
target_data_input = target_data_input.cuda()
target_data = target_data.cuda()
else:
source_data = torch.autograd.Variable(source_data).long()
target_data_input = torch.autograd.Variable(target_data_input).long()
target_data = torch.autograd.Variable(target_data).squeeze()
out = model(source_data, target_data_input)
loss_now = criterion(out.view(-1, 30000), autograd.Variable(target_data.view(-1).long()))
weights = target_data.view(-1) != 0
loss_now = torch.sum((loss_now * weights.float())) / torch.sum(weights.float())
loss += loss_now.data.item()
return loss
def get_test_bleu(model, target_id2word, data_iter):
model.eval()
process_bar = tqdm(data_iter)
refs = []
preds = []
for source_data, target_data_input, target_data in process_bar:
target_input = torch.Tensor(np.zeros([source_data.shape[0], 1])+2)
if config.cuda and torch.cuda.is_available():
source_data = source_data.cuda()
target_input = target_input.cuda().long()
else:
source_data = torch.autograd.Variable(source_data).long()
target_input = torch.autograd.Variable(target_input).long()
target_data = target_data.numpy()
out = model(source_data, target_input,mode="test")
out = np.array(out).T
tmp_preds = []
for i in range(out.shape[0]):
tmp_preds.append([])
for i in range(out.shape[0]):
for j in range(out.shape[1]):
if out[i][j]!=3:
tmp_preds[i].append(out[i][j])
else:
break
preds += tmp_preds
tmp_refs = []
for i in range(target_data.shape[0]):
tmp_refs.append([])
for i in range(target_data.shape[0]):
for j in range(target_data.shape[1]):
if target_data[i][j]!=3 and target_data[i][j]!=0:
tmp_refs[i].append(target_data[i][j])
tmp_refs = [[x] for x in tmp_refs]
refs+=tmp_refs
bleu = corpus_bleu(refs,preds)*100
with open("./data/result.txt","w") as f:
for i in range(len(preds)):
tmp_ref = [target_id2word[id] for id in refs[i][0]]
tmp_pred = [target_id2word[id] for id in preds[i]]
f.write("ref: "+" ".join(tmp_ref)+"\n")
f.write("pred: "+" ".join(tmp_pred)+"\n")
f.write("\n\n")
return bleu
import config as argumentparser
if __name__ == '__main__':
# source_vocab_size=30000,target_vocab_size=30000,embedding_size=256,
# source_length=100,target_length=100,lstm_size=256
config = argumentparser.ArgumentParser()
training_set = iwslt_Data()
training_iter = torch.utils.data.DataLoader(dataset=training_set,
batch_size=config.batch_size,
shuffle=True,
num_workers=0)
valid_set = iwslt_Data(source_data_name="IWSLT14.TED.dev2010.de-en.de",
target_data_name="IWSLT14.TED.dev2010.de-en.en")
valid_iter = torch.utils.data.DataLoader(dataset=valid_set,
batch_size=config.batch_size,
shuffle=True,
num_workers=0)
test_set = iwslt_Data(source_data_name="IWSLT14.TED.tst2012.de-en.de",
target_data_name="IWSLT14.TED.tst2012.de-en.en")
test_iter = torch.utils.data.DataLoader(dataset=test_set,
batch_size=config.batch_size,
shuffle=True,
num_workers=0)
model = AttentionNMT(config)
criterion = LMLoss()
if config.cuda and torch.cuda.is_available():
model.cuda()
criterion.cuda()
print(torch.cuda.is_available(), config.cuda)
optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
target_id2word = dict([[x[1], x[0]] for x in training_set.target_word2id.items()])
loss = -1
for epoch in range(config.epoch):
model.train()
process_bar = tqdm(training_iter)
for source_data, target_data_input, target_data in process_bar:
model.train()
for source_data, target_data_input, target_data in process_bar:
if config.cuda and torch.cuda.is_available():
source_data = source_data.cuda()
target_data_input = target_data_input.cuda()
target_data = target_data.cuda()
else:
source_data = torch.autograd.Variable(source_data).long()
target_data_input = torch.autograd.Variable(target_data_input).long()
target_data = torch.autograd.Variable(target_data).squeeze()
out = model(source_data, target_data_input)
loss_now = criterion(target_data, out)
if loss == -1:
loss = loss_now.data.item()
else:
loss = 0.95 * loss + 0.05 * loss_now.data.item()
process_bar.set_postfix(loss=loss_now.data.item())
process_bar.update()
optimizer.zero_grad()
loss_now.backward()
optimizer.step()
test_bleu = get_test_bleu(test_iter)
print("test bleu is:", test_bleu)
valid_loss = get_dev_loss(valid_iter)
print("valid loss is:", valid_loss) | null | torch_attention_nmt.py | torch_attention_nmt.py | py | 6,653 | python | en | code | null | code-starcoder2 | 51 |
85523096 |
import pandas as pd
import numpy as np
import nltk
from tensorflow.keras.utils import to_categorical
from nltk import RegexpTokenizer
from nltk.corpus import stopwords, wordnet
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
try:
nltk.data.find('corpus/stopwords')
except LookupError:
nltk.download('stopwords')
class TextDataset:
""" TextDataset object is used to manage text dataset
It builds from json file
Args:
json_files (tuple): paths of the files to parse
x_col (str): name of the column containing data
y_col (str): name of the column containing labels
Attributes:
_data (pandas.DataFrame): DataFrame containing the full set
"""
def __init__(self, json_files:str, x_col:str, y_col:str):
self._data = self._read_json(json_files)
self._labels = None
self._add_one_hot()
print(f'Loaded {self.__len__()} rows')
def _read_json(self, tuple_files:tuple) -> pd.DataFrame:
""" Read multiple json files and concat them in a single DataFrame
Parameters:
tuple_files (tuple): path of the files
"""
df = pd.DataFrame()
for file in tuple_files:
df = df.append(pd.read_json(file), ignore_index=True)
return df
def _add_one_hot(self):
""" Add labels converted to one hot vector to the dataset
"""
self._labels, indices = np.unique(self._data['intent'], return_inverse=True)
one_hot_values = to_categorical(indices)
self._data = pd.concat((self._data, pd.DataFrame(one_hot_values)), axis=1)
def _find_synonyms(self, word:str) -> list:
""" Find the french synonyms of a given word
Parameters:
word (str): a word
Returns:
list: A list of synonyms of a given word
"""
synonyms = []
for synset in wordnet.synsets(word):
for syn in synset.lemma_names('fra'):
if syn not in synonyms:
synonyms.append(syn)
return synonyms
def _synonym_replacement(self, sentence:str) -> list:
""" Build new sentenced by converting some words to there synonyms
Parameters:
sentence (str): a sentence
Returns:
list: Outputs a list of sentence with modified words
"""
toknizer = RegexpTokenizer(r'''\w'|\w+|[^\w\s]''')
words = toknizer.tokenize(sentence)
stoplist = stopwords.words('french')
stoplist.append('ferret')
n_sentence = []
for w in words:
if w not in stoplist:
syn = self._find_synonyms(w)
if len(syn) > 0:
for s in syn[:min(10, len(syn))]:
n_sentence.append(sentence.replace(w, s))
return n_sentence
def augment_data(self) -> pd.DataFrame:
""" Augment the dataset
"""
new_sentences = []
labels = []
one_hot_lab = []
for index, row in self._data.iterrows():
if row['intent'] != 'irrelevant':
sentences = self._synonym_replacement(row['sentence'])
for s in sentences:
new_sentences.append(s)
labels.append(row['intent'])
vector = np.zeros(8)
idx = list(self._labels).index(row['intent'])
vector[idx] = 1
one_hot_lab.append(vector)
new_data = pd.DataFrame({'sentence': new_sentences, 'intent': labels})
ones = pd.DataFrame(one_hot_lab)
return pd.concat((new_data, ones), axis=1)
def augment_and_balance(self):
""" Augment and balance the dataset, it takes the smallest number of occurence
of one classe and balance the number in other classes
"""
self._data = self._data.sample(frac=1)
augmented_data = self.augment_data().sample(frac=1)
# counts
count_init = self._data['intent'].value_counts()
count_augm = augmented_data['intent'].value_counts()
count_augm['irrelevant'] = 0
sum_counts = count_init + count_augm
min_value = min(sum_counts)
n_df = pd.DataFrame()
for cl in self._labels:
if count_init[cl] >= min_value:
select = self._data.loc[self._data['intent'] == cl][:min_value]
n_df = n_df.append(select, ignore_index=True)
else:
missing_data = min_value - count_init[cl]
n_df = n_df.append(self._data.loc[self._data['intent'] == cl], ignore_index=True)
select = augmented_data.loc[augmented_data['intent'] == cl][:missing_data]
n_df = n_df.append(select, ignore_index=True)
balanced_data = n_df.sample(frac=1)
balanced_data['intent'].value_counts()
self._data = balanced_data
print(f'Dataset contains now {self.__len__()} rows')
def split_data(self, frac=0.2) -> tuple:
""" Split the dataset into training set and testing set
Parameters:
frac (double): the fraction of dataset to be used as test set
Returns:
tuple: outputs a tuple containing the train and the test dataset
"""
df = self._data.sample(frac=1)
size_train = int((1 - frac) * self.__len__())
return df[:size_train], df[size_train:]
@property
def data(self):
return self._data
def __len__(self):
return len(self._data)
| null | src/TextDataset.py | TextDataset.py | py | 5,582 | python | en | code | null | code-starcoder2 | 51 |
431667763 | # Jeff Austin
# 7/16/2019
# Portland State University
# CS350
# Daniel LeBlanche
# HW3
# Merge sort code in pyhton 3
import sys
import random
import time
import math
# merge sort algorithm
# taken from D. LeBlanche's slides: http://web.cecs.pdx.edu/~dleblanc/cs350/sorting.pdf
def merge_sort(A):
if len(A) == 0:
return []
if len(A) == 1:
return A
mid = math.floor(len(A) // 2)
B = []
C = []
#for i in range(mid - 1): #these were making my code run infinitely.
#B.append(A[i]) #slice operators saved the day!
B = merge_sort(A[:mid])
#for i in range(mid, len(A) - 1):
# C.append(A[i])
C = merge_sort(A[mid:])
return merge_lists(B, C)
# merge two lists
# taken from D. LeBlanche's slides: http://web.cecs.pdx.edu/~dleblanc/cs350/sorting.pdf
def merge_lists(B, C):
i = 0
j = 0
A = []
while i < len(B) and j < len(C):
if B[i] <= C[j]:
A.append(B[i])
i = i + 1
else:
A.append(C[j])
j = j + 1
if i == len(B):
Z = []
for i in range(j, len(C)):
Z.append(C[i])
A = A + Z
else:
Z = []
for k in range(i, len(B)):
Z.append(B[k])
A = A + Z
return A
# generate list
def gen_list(size):
random.seed()
# n = random.randint(0, size)
liss = []
i = 0
while i < size:
liss.append(random.randint(0, 100)) # fill the list with values from zero to 100
i = i + 1
return liss
def main():
if len(sys.argv) < 1:
print("Error. Not enough arguments")
return
size = int(sys.argv[1])
ticks1 = time.time()
list = gen_list(size)
ticks2 = time.time()
list.sort()
# formatting idea came from: https://stackoverflow.com/questions/8595973/truncate-to-three-decimals-in-python
print("Total time to generate list in seconds: " + str('%.3f'%(ticks2 - ticks1)) + '.')
ticks1 = time.time()
list = merge_sort(list)
ticks2 = time.time()
print("Total merge sort time taken in seconds: " + str('%.3f'%(ticks2 - ticks1)) + '.')
#print("sorted")
#print(list)
return
if __name__ == '__main__':
main()
| null | already_sorted_merge_sort.py | already_sorted_merge_sort.py | py | 2,051 | python | en | code | null | code-starcoder2 | 51 |
460143201 | #Apresentação
print('Conversor de binario para decimal')
print('---------------------------------')
#Valor do numero binario
x = int(input('Digite o numero binario com no maximo 4 digitos: '))
#Calculos e condições
if x >= 1:
dig1 = x % 10
x = x - dig1
rdig1 = x % 100
else:
dig1 = 0
if x >= 10:
dig2 = int(rdig1 * 10**-1)
rdig2 = x % 1000
else:
dig2 = 0
if x >= 100:
dig3 = int(rdig2 * 10**-2)
rdig3 = x % 10000
else:
dig3 = 0
if x >= 1000:
dig4 = int(rdig3 * 10**-3)
else:
dig4 = 0
decimal = (dig1*2**0)+(dig2*2**1)+(dig3*2**2)+(dig4*2**3)
#Saida
print('O valor desse numero em decimal é igual a:',decimal)
| null | Python/Fabio lista 1/Fabio_01_Q31.py | Fabio_01_Q31.py | py | 707 | python | en | code | null | code-starcoder2 | 50 |
508787170 | from pulp import *
import time as time
import numpy as np
a = 10
NODE_CPU_INDEX = 0
NODE_MEMORY_INDEX = 1
NODE_POD_SPACE_INDEX = 2
POD_CPU_INDEX = 0
POD_MEMORY_INDEX = 1
podList = [
[10, 3],
[10, 1],
[10, 3]
]
nodeList = [
[30, 5, 9],
[40, 3, 7]
]
def schedule_solve(podList, nodeList, VERBOSE = False):
print("====python func=====")
podNum = len(podList)
nodeNum = len(nodeList)
print(podList)
print(nodeList)
nRow = [i for i in range(nodeNum)]
pCol = [i for i in range(podNum)]
# matrix stands for pod selection
choices = LpVariable.matrix("choice", (nRow, pCol),0,1,LpInteger)
# node usage
nodeOccupation = LpVariable.matrix("node", nRow,0,1,LpInteger)
prob = LpProblem("lp", LpMinimize)
# minimaze the node usage
prob += lpSum(nodeOccupation), "objective function"
# one pod can only assign to one node
for c in range(len(pCol)):
prob += lpSum([choices[r][c] for r in range(len(nRow))]) == 1, ""
for r in range(len(nRow)):
# if there is pod in this node
for c in range(len(pCol)):
prob += choices[r][c] <= nodeOccupation[r], ""
# satisfy node cpu capacity
prob += lpSum([podList[c][POD_CPU_INDEX] * choices[r][c] for c in range(len(pCol))]) <= nodeList[r][NODE_CPU_INDEX], ""
# satisfy node memory capacity
prob += lpSum([podList[c][POD_MEMORY_INDEX] * choices[r][c] for c in range(len(pCol))]) <= nodeList[r][NODE_MEMORY_INDEX], ""
# satisfy node pod number capacity
prob += lpSum([choices[r][c] for c in range(len(pCol))]) <= nodeList[r][NODE_POD_SPACE_INDEX]
prob.solve()
print(LpStatus[prob.status])
print("objective:",value(prob.objective))
if VERBOSE:
print(prob)
result = [[0 for col in range(podNum)] for row in range(nodeNum)]
for v in prob.variables():
t = v.name.split('_')
if t[0] == 'choice':
result[int(t[1])][int(t[2])] = v.varValue
# print(v.name, "=", v.varValue)
print(result)
print("====python func=====")
return result
# start = time.time()
# podList = np.random.rand(2, 2)
# nodeList = np.random.rand(1, 3)
# schedule_solve(podList, nodeList)
# print(time.time() - start)
| null | lp-solver.py | lp-solver.py | py | 2,294 | python | en | code | null | code-starcoder2 | 50 |
585383765 | import tarfile
import numpy as np
from glob import glob
from .utilities import *
# PARSING UTILITY FUNCTIONS ====================================================
def get_files(path, name):
"""Gets list of files from directory."""
return glob(f"{path}**/{name}*.tar.xz") + glob(f"{path}**/{name}*.json")
def get_coords(R):
"""Get coordinates for given radius."""
return [[u,v,w]
for u in range(-R + 1,R)
for v in range(-R + 1,R)
for w in range(-R + 1,R)
if (u + v + w) == 0]
def get_struct(c):
"""Convert cell features into tuple."""
if c[-1]:
return (c[1], c[2], np.round(c[4]), np.round(np.mean(c[-1])))
else:
return (c[1], c[2], np.round(c[4]), -1)
def extract_setup_fields(jsn):
"""Extracts simulation setup fields."""
R = jsn["config"]["size"]["radius"]
H = jsn["config"]["size"]["height"]
time = [tp["time"] for tp in jsn["timepoints"]]
pops = [p[0] for p in jsn["config"]["pops"]]
types = [i for i in range(0,7)]
return R, H, time, pops, types
def extract_agents_fields(lst, coords, H, N):
"""Extract cell agent fields."""
# Create empty structured array.
container = np.empty((2*H - 1, len(coords), N),
dtype = {
'names': ['pop', 'type', 'volume', 'cycle'],
'formats': [np.int8, np.int8, np.int16, np.int16]
})
# Set all values in array to -1.
container[:] = -1
# Compile entries
[container.itemset((coord[-1] + H - 1, coords.index(coord[0:-1]), cell[3]), get_struct(cell))
for coord, cells in lst for cell in cells]
return container
# GENERAL PARSING ==============================================================
def parse_simulations(name, data_path, result_path, exclude):
"""Parses simulation files."""
for file in get_files(data_path, name):
# Create empty arrays.
container = {
"agents": [],
"environments": {
"glucose": [],
"oxygen": [],
"tgfa": []
}
}
if is_tar(file):
# Parse .tar.xz file.
tar = tarfile.open(file, "r:xz")
# Iterate through all members of the tar.
for i, member in enumerate(tar.getmembers()):
seed = int(member.name.replace(".json", "").split("_")[-1])
# Skip if seed is in exclude list.
if seed in exclude:
continue
print(f" > {member.name}")
parse_simulation(load_json(member, tar=tar), container)
else:
# Parse .json file
parse_simulation(load_json(file), container)
# Compile data.
data = {
"agents": np.array(container['agents']),
"environments": { x: np.array(container['environments'][x], dtype=np.float16)
for x in container["environments"].keys() },
"setup": container["setup"]
}
# Pickle results.
save_path = file.replace(".tar.xz", ".pkl").replace(".json", ".pkl").replace(data_path, result_path)
pickle.dump(data, open(save_path, "wb"), protocol=4)
def parse_simulation(jsn, container):
"""Parse simulation instance."""
# Get simulation setup.
R, H, time, pops, types = extract_setup_fields(jsn)
coords = get_coords(R)
N = 6
# Parse agents.
container["agents"].append([extract_agents_fields(tp["cells"], coords, H, N) for tp in jsn["timepoints"]])
# Parse environments.
container["environments"]["glucose"].append([tp["molecules"]["glucose"] for tp in jsn["timepoints"]])
container["environments"]["oxygen"].append([tp["molecules"]["oxygen"] for tp in jsn["timepoints"]])
container["environments"]["tgfa"].append([tp["molecules"]["tgfa"] for tp in jsn["timepoints"]])
# Add simulation setup to container.
if not "setup" in container:
container["setup"] = {
"radius": R,
"height": H,
"time": time,
"pops": pops,
"types": types,
"coords": coords
}
| null | scripts/parse.py | parse.py | py | 4,195 | python | en | code | null | code-starcoder2 | 50 |
40251915 | #%% Project Euler Problem 6
# Justin Kim
# Difference between the square of the sum and the sum of the squares
def problem6(n):
sumsq = 0
sqsum = 0
for i in range(n + 1):
sumsq += i**2
sqsum += i
sqsum *= sqsum
ans = sqsum - sumsq
return ans
ans = problem6(100) | null | old/P6.py | P6.py | py | 303 | python | en | code | null | code-starcoder2 | 50 |
417326244 | # Solved by Sunghyun Cho on August 25th, 2018.
houseNum = int(input())
xList = []
yList = []
for a in range(houseNum):
arr = input().split()
xList.append(float(arr[0]))
yList.append(float(arr[1]))
print(sum(xList)/len(xList), sum(yList)/len(yList))
# ????? | null | 4. 우물왕 김배찌/Q4.py | Q4.py | py | 263 | python | en | code | null | code-starcoder2 | 50 |
355276877 | import argparse
import datetime
import os
import pickle
import uuid
import torch
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from moviepy.editor import VideoFileClip
from torch.autograd import Variable
from torchvision import transforms
from .model import EncoderCNN, DecoderRNN
def to_var(x, volatile=False):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, volatile=volatile)
def transform_image(image, transform=None):
image = image.resize([224, 224], Image.LANCZOS)
if transform is not None:
image = transform(image).unsqueeze(0)
return image
def load_model(vocab_path, embed_size, hidden_size, num_layers, encoder_path, decoder_path):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocabulary wrapper
with open(vocab_path, 'rb') as f:
vocab = pickle.load(f)
# Build Models
encoder = EncoderCNN(embed_size)
encoder.eval() # evaluation mode (BN uses moving mean/variance)
decoder = DecoderRNN(embed_size, hidden_size,
len(vocab), num_layers)
# Load the trained model parameters
encoder.load_state_dict(torch.load(encoder_path))
decoder.load_state_dict(torch.load(decoder_path))
return encoder, decoder, vocab, transform
def caption_video(encoder, decoder, vocab, transform, video, fps=0.1, save=False, image_dir=None):
# Image preprocessing
report = []
for i, frame in enumerate(video.iter_frames(fps=fps)):
time_stamp = datetime.timedelta(seconds=i / fps)
image = Image.fromarray(frame)
image = transform_image(image, transform)
image_tensor = to_var(image, volatile=True)
# If use gpu
if torch.cuda.is_available():
encoder.cuda()
decoder.cuda()
# Generate caption from image
feature = encoder(image_tensor)
sampled_ids = decoder.sample(feature)
sampled_ids = sampled_ids.cpu().data.numpy()
# Decode word_ids to words
sampled_caption = []
for word_id in sampled_ids:
word = vocab.idx2word[word_id]
if word != '<start>' and word != '<end>':
sampled_caption.append(word)
if word == '<end>':
break
sentence = ' '.join(sampled_caption)
report.append((str(time_stamp), sentence))
print(time_stamp, sentence)
# Print out image and generated caption
if save == 'true' and image_dir:
plt.axis('off')
plt.imshow(frame)
plt.title(sentence)
plt.savefig(os.path.join(image_dir, str(uuid.uuid4()) + str(i)), bbox_images='tight')
return report
| null | lib/caption.py | caption.py | py | 2,904 | python | en | code | null | code-starcoder2 | 51 |
228051984 | #WebGen for Windows by Liam Platt
import os
def GetSiteName():
'''Collects a name for the site'''
SiteNameConfirm = "n"
while SiteNameConfirm != "y":
SiteName = input("Enter name of web site: ").lower()
print()
SiteNameConfirm = input("You entered '"+SiteName.title()+"', is this correct? [Y/N] ").lower()
print()
return SiteName
def MakeFolder(FolderName):
'''Creates a directory for the site using site's name'''
os.makedirs(FolderName)
os.makedirs(FolderName+"/assets")
if os.path.exists(FolderName):
print("["+FolderName+"] Successfully created folder")
if os.path.exists(FolderName+"/assets"):
print("["+FolderName+"/assets] Successfully created folder")
print()
return
def GetHTMLVer():
'''Collects desired HTML version, probably going to be HTML5'''
HTMLVer = input("Would you like to use HTML5 (recommended)? [Y/N] ").lower()
print()
if HTMLVer == "y":
return "5"
elif HTMLVer == "n":
return "4"
def MakeIndex(FolderName, HTMLVer):
'''Creates the index.html file and appends the HTML5 or HTML4 doctype declaration'''
Index = open(FolderName+"/index.html","w")
if HTMLVer == "5":
Index.write("<!DOCTYPE html>")
Index.close()
if HTMLVer == "4":
Index.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">')
Index.close()
print("["+FolderName+"/index.html] Successfully created file")
print("["+FolderName+"/index.html] Successfully updated file with HTML doctype")
return
def PopulateIndex(FolderName):
'''Appends the base HTML tags to index.html'''
Index = open(FolderName+"/index.html","a")
Index.write('''
<html>
<head>
<meta charset="utf-8" />
<link rel="stylesheet" type="text/css" href="assets/styles.css">
<title>Home | '''+FolderName.title()+'''</title>
</head>
<body>
<h1>Site Successfully Generated!</h1>
<p>You can alter this page by editing the <code>'''+FolderName.lower()+'''/index.html</code> file.</p>
</body>
</html>''')
print("["+FolderName+"/index.html] Successfully updated file with standard HTML tags")
return
def MakeCSS(FolderName):
'''Creates a styles.css file within the assets folder for user'''
CSS = open(FolderName+"/assets/styles.css","w")
CSS.close()
print("["+FolderName+"/assets/styles.css] Successfully created file")
return
if __name__ == '__main__':
Site_Name = GetSiteName()
MakeFolder(Site_Name)
HTML_Ver = GetHTMLVer()
MakeIndex(Site_Name,HTML_Ver)
PopulateIndex(Site_Name)
MakeCSS(Site_Name)
print("\nCompleted!")
| null | WebGen.py | WebGen.py | py | 2,713 | python | en | code | null | code-starcoder2 | 51 |
310143459 | from django.conf.urls import url
from mysite.blog.views import post_list, post_detail, post_new, post_edit
urlpatterns = [
url(r'^$', post_list),
url(r'^post/(?P<pk>[0-9]+)/$', post_detail),
url(r'^post/new/$', post_new, name='post_new'),
url(r'^post/(?P<pk>[0-9]+)/edit/$', post_edit, name='post_edit'),
]
| null | mysite/blog/urls.py | urls.py | py | 325 | python | en | code | null | code-starcoder2 | 51 |
351697920 | from pytorch.finetune.imports import *
from system.imports import *
from pytorch.finetune.level_9_transforms_main import prototype_transforms
class prototype_schedulers(prototype_transforms):
@accepts("self", verbose=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def lr_fixed(self):
'''
Set learning rate fixed
Args:
None
Returns:
None
'''
self.system_dict = scheduler_fixed(self.system_dict);
self.custom_print("Learning rate scheduler");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, gamma=["gt", 0.01, "lt", 1], last_epoch=None, post_trace=False)
@error_checks(None, ["gt", 0], gamma=["gt", 0], last_epoch=None, post_trace=False)
@accepts("self", int, gamma=float, last_epoch=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def lr_step_decrease(self, step_size, gamma=0.1, last_epoch=-1):
'''
Set learning rate to decrease in regular steps
Args:
step_size (int): Step interval for decreasing learning rate
gamma (str): Reduction multiplier for reducing learning rate post every step
last_epoch (int): Set this epoch to a level post which learning rate will not be decreased
Returns:
None
'''
self.system_dict = scheduler_step(self.system_dict, step_size, gamma=gamma, last_epoch=last_epoch);
self.custom_print("Learning rate scheduler");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, gamma=["gt", 0.01, "lt", 1], last_epoch=None, post_trace=False)
@error_checks(None, ["inc", None], gamma=["gt", 0], last_epoch=None, post_trace=False)
@accepts("self", [list, int], gamma=float, last_epoch=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def lr_multistep_decrease(self, milestones, gamma=0.1, last_epoch=-1):
'''
Set learning rate to decrease in irregular steps
Args:
milestones (list): List of epochs at which learning rate is to be decreased
gamma (str): Reduction multiplier for reducing learning rate post every step
last_epoch (int): Dummy variable
Returns:
None
'''
self.system_dict = scheduler_multistep(self.system_dict, milestones, gamma=gamma, last_epoch=last_epoch);
self.custom_print("Learning rate scheduler");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, ["gt", 0.01, "lt", 1], last_epoch=None, post_trace=False)
@error_checks(None, ["gt", 0], last_epoch=None, post_trace=False)
@accepts("self", [float, int], last_epoch=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def lr_exponential_decrease(self, gamma, last_epoch=-1):
'''
Set learning rate to decrease exponentially every step
Args:
gamma (str): Reduction multiplier for reducing learning rate post every step
last_epoch (int): Set this epoch to a level post which learning rate will not be decreased
Returns:
None
'''
self.system_dict = scheduler_exponential(self.system_dict, gamma, last_epoch=last_epoch);
self.custom_print("Learning rate scheduler");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, mode=None, factor=["gt", 0.01, "lt", 1], patience=["lt", 20], verbose=None, threshold=None,
threshold_mode=None, cooldown=None, min_lr=None, epsilon=["lt", 0.0001], post_trace=False)
@error_checks(None, mode=["in", ["min", "max"]], factor=["gt", 0], patience=["gt", 0], verbose=None, threshold=["gte", 0],
threshold_mode=["in", ["rel", "abs"]], cooldown=["gte", 0], min_lr=["gte", 0], epsilon=["gte", 0], post_trace=False)
@accepts("self", mode=str, factor=[float, int], patience=int, verbose=bool, threshold=[float, int],
threshold_mode=str, cooldown=int, min_lr=[float, list, int], epsilon=float, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def lr_plateau_decrease(self, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, \
threshold_mode='rel', cooldown=0, min_lr=0, epsilon=1e-08):
'''
Set learning rate to decrease if a metric (loss) stagnates in a plateau
Args:
mode (str): Either of
- 'min' : lr will be reduced when the quantity monitored (loss) has stopped decreasing;
- 'max' : lr reduced when the quantity monitored (accuracy) has stopped increasing.
factor (float): Reduction multiplier for reducing learning rate post every step
patience (int): Number of epochs to wait before reducing learning rate
verbose (bool): If True, all computations and wait times are printed
threshold (float): Preset fixed to 0.0001
threshold_mode (str): Preset fixed to 'rel' mode
cooldown (int): Number of epochs to wait before actually applying the scheduler post the actual designated step
min_lr (float): Set minimum learning rate, post which it will not be decreased
epsilon (float): A small value to avoid divison by zero.
last_epoch (int): Set this epoch to a level post which learning rate will not be decreased
Returns:
None
'''
self.system_dict = scheduler_plateau(self.system_dict, mode=mode, factor=factor, patience=patience, verbose=verbose,
threshold=threshold, threshold_mode=threshold_mode, cooldown=cooldown, min_lr=min_lr, epsilon=epsilon);
self.custom_print("Learning rate scheduler");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
self.custom_print("");
###############################################################################################################################################
| null | monk/pytorch/finetune/level_10_schedulers_main.py | level_10_schedulers_main.py | py | 8,708 | python | en | code | null | code-starcoder2 | 51 |
95110167 | import numpy as np
import time
import pygame
from functools import reduce
import random as ra
from random import randint as ri
import math as ma
from pygame.locals import *
from oop_phy_pygame import *
# инициализация pygame
pygame.init()
# масштаб
p = 1.91
scax = scay = 50 #40*p#87.5*p
# сдвиг, в % от всего изображения
indx, indy = 0, 0 # percent
# масса
m1 = -1 #ra.randint(3, 7)
m2 = 1*10**0.5 #ra.randint(3, 7)
# положение тел
xp1, yp1 = 0, 0 #ra.randint(-3, 3), ra.randint(-3, 3) -2.5
xp2, yp2 = 0, 3 #ra.randint(-3, 3), ra.randint(-3, 3)
# нач скорость
xv1, yv1 = 0, 0 #ra.randint(-3, 3)*10**-4, ra.randint(-3, 3)*10**-4 5.3153
xv2, yv2 = 4, 0 #ra.randint(-3, 3)*10**-4, ra.randint(-3, 3)*10**-4
# шаг времени
step = 1*10**-6.75
# границы
border = (0, 0) #(16, 8)
# реагирует ли тело на другие тела
react1 = 1
react2 = 1 #
# реагируют ли другие тела на тело
reall1 = 1
reall2 = 1
# цвет тел
col1 = (0, 0, 255)
col2 = (255, 0, 0)
# радиус пути
rpath = 1
# радиус отрисовки тел
r1 = r2 = r3 = r4 = r_n = 10
# отрисовка тел
draw1 = 1
draw2 = 1 #
draw_n = 1
# максимальное количество точек в массиве пути
max = 750
# соединять ли точки пути
conn = bool( 1 )
# движение
ind_n = 0.005
ind_c = 1
#
sca_n = 0.001
sca_c = 1
# отрисовка векторов
dr_vec1 = 1 #
dr_vec2 = 1
dr_vec_n = 1
# толщина линии вектора нач скорости
# при создании нового тела
st_vec_r = 6
# частота отрисовки
dr_fr_path = 50 #+ 4*52
dr_fr_body = 300
# импорт картинки, реализация экрана
scr = (1540, 801) #(1080, 2340)
path, bgr = main_relise("space2.jpg", scr)
star = img_imp("star2.png", 50, (255, 255, 255))
# реализация текста
dr_txt = bool( 1 )
f_siz = 30
num_symol = 6
st_point = (15, 15)
fram_c = (127, 127, 127)
font, bla, black = font_rel(f_siz, num_symol, 1, fram_c)
# параметры для шоу "смена частоты отрисовки"
cha = False
conv_n = [True for _ in range(3)]
end_n = [True for _ in range(2)]
conv_v = 5.125
end_v = 20.5
i_conv = i_end = end_in = 0
# создание экземпляра класса
a = body(m1, [xp1, yp1], [xv1, yv1], (step, border, react1, reall1), (col1, rpath, r1, draw1, dr_vec1, max, conn))
b = body(m2, [xp2, yp2], [xv2, yv2], (step, border, react2, reall2), (col2, rpath, r2, draw2, dr_vec2, max, conn), model=star)
# массив со всеми телами, что
# будут использоваться в симуляции
all_bodies = [a, b]
# создаём "упаковки" для информации
txt = dr_txt, st_point, font, bla, black
draw = scr, path, bgr, dr_fr_path, dr_fr_body, max, conn
correction = scax, scay, indx, indy, ind_n, ind_c, sca_n, sca_c
show = cha, conv_n, end_n, conv_v, end_v, i_conv
phy = step, border, rpath, r_n, draw_n, dr_vec_n, st_vec_r
main_f(all_bodies, phy, draw, txt, show, correction)
| null | oop_phy_pyg_values.py | oop_phy_pyg_values.py | py | 3,205 | python | en | code | null | code-starcoder2 | 51 |
6415726 | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from pathlib import Path
from datadog_checks.dev.tooling.constants import get_root, set_root
from datadog_checks.dev.tooling.datastructures import JSONDict
from datadog_checks.dev.tooling.manifest_validator import get_all_validators
def test_manifest_ok():
manifest = JSONDict(
{
"categories": ["os & system", "log collection"],
"creates_events": False,
"description": "Collect and graph Microsoft Active Directory metrics",
"display_name": "Active Directory",
"guid": "ba667ff3-cf6a-458c-aa4b-1172f33de562",
"is_public": True,
"maintainer": "help@datadoghq.com",
"manifest_version": "1.0.0",
"metric_prefix": "active_directory.",
"metric_to_check": "active_directory.dra.inbound.objects.persec",
"name": "active_directory",
"public_title": "Datadog-Active Directory Integration",
"short_description": "Collect and graph Microsoft Active Directory metrics",
"support": "core",
"supported_os": ["windows"],
"type": "check",
"integration_id": "active-directory",
"assets": {
"configuration": {"spec": "assets/configuration/spec.yaml"},
"monitors": {},
"dashboards": {"Active Directory": "assets/dashboards/active_directory.json"},
"service_checks": "assets/service_checks.json",
"logs": {"source": "ruby"},
"metrics_metadata": "metadata.csv",
},
}
)
root = Path(os.path.realpath(__file__)).parent.parent.parent.parent.parent.absolute()
current_root = get_root()
set_root(str(root))
try:
validators = get_all_validators(False, "1.0.0")
for validator in validators:
validator.validate('active_directory', manifest, False)
assert not validator.result.failed, validator.result
assert not validator.result.fixed
finally:
set_root(current_root)
| null | datadog_checks_dev/tests/tooling/manifest_validator/test_validator.py | test_validator.py | py | 2,193 | python | en | code | null | code-starcoder2 | 51 |
308466528 | # A test file for HTML reporting by coverage.
def one(x):
# This will be a branch that misses the else.
if x < 2:
a = 3
else:
a = 4
one(1)
def two(x):
# A missed else that branches to "exit"
if x:
a = 5
two(1)
def three_way():
# for-else can be a three-way branch.
for i in range(10):
if i == 3:
break
else:
return 23
return 17
three_way()
| null | test/farm/html/src/b.py | b.py | py | 434 | python | en | code | null | code-starcoder2 | 51 |
359433116 | # whenever you import a file, python runs the code in that file, thats
# why __name__ variable will be the the name of the file and not __main__
# now if python is running a file directly __name__ == '__main__'
# This way you can run code you only want to run if the file is being ran directly
#
import asyncio_playground
async def main():
result = await asyncio_playground.find_divisibles(100, 99)
print (result) | null | test_imports.py | test_imports.py | py | 424 | python | en | code | null | code-starcoder2 | 51 |
199638814 | # Objetivo: Receber 2 valores reais. Calcular e mostrar o maior deles.
# Programador: Hugo Leça Ribeiro
# Data de Elaboração: 24.10.2019
def Pmaior(n1, n2):
if (n1 > n2):
print("O maior número entre os dois é: ", n1)
elif n2 > n1:
print("O maior número entre os dois é: ", n2)
else:
print("Os números são iguais")
n1 = float(input("Digite aqui o valor do primeiro número: "))
n2 = float(input("Digite aqui o valor do segundo número: "))
Pmaior(n1, n2)
| null | LT01_EstMod19.py | LT01_EstMod19.py | py | 503 | python | en | code | null | code-starcoder2 | 51 |
633401312 | # -*- coding: utf-8 -*-
# Author: Ji Yang <jiyang.py@gmail.com>
# License: MIT
import random
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
padding = transforms.Compose([transforms.Resize(160),
transforms.Pad(30, padding_mode='reflect'),
transforms.RandomRotation((-8, 8)),
transforms.RandomApply([transforms.RandomAffine(0, shear=8)]),
transforms.RandomCrop(192)])
rescaling = transforms.Compose([transforms.Resize(192),
transforms.RandomApply([transforms.RandomAffine(0, shear=8)]),
transforms.RandomRotation((-8, 8))])
crop_rescaling = transforms.Compose([transforms.RandomCrop(84),
transforms.Resize(160),
transforms.Pad(30, padding_mode='reflect'),
transforms.RandomRotation((-6, 6)),
transforms.RandomCrop(192)])
strong_augmentation_transform = transforms.Compose([transforms.RandomChoice([padding, rescaling, crop_rescaling]),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([transforms.ColorJitter(brightness=0.1,
contrast=0.1,
saturation=0.1,
hue=0.1)]),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
basic_augmentation_transform = transforms.Compose([transforms.RandomChoice([padding, rescaling]),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor()
])
val_test_transform = transforms.Compose([transforms.Resize(192),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
class SaltDataset(Dataset):
def __init__(self, image, mask=None, transform=strong_augmentation_transform, is_train=True):
self.image = image
self.mask = mask
self.transform = transform
self.is_train = is_train
def __len__(self):
return self.image.shape[0]
def __getitem__(self, idx):
image = Image.fromarray(np.uint8(self.image[idx] * 255))
seed = random.randint(6, 6 ** 6)
random.seed(seed)
if self.transform is not None:
image = self.transform(image)
else:
image = val_test_transform(image)
if self.is_train:
mask = Image.fromarray(np.uint8(self.mask[idx]))
random.seed(seed)
if self.transform is not None:
mask = self.transform(mask)
else:
mask = val_test_transform(mask)
mask = (mask > 0.5).float() # round resize artifact
return {'image': image, 'mask': mask}
return {'image': image}
| null | salt_dataset_192.py | salt_dataset_192.py | py | 3,801 | python | en | code | null | code-starcoder2 | 51 |
48302102 | for tc in range(1, 11):
n = int(input())
tree = [[0]]
for _ in range(n):
tree.append(list(input().split()))
for i in range(len(tree) - 1, 0, -1):
if len(tree[i]) == 4:
left = int(tree[int(tree[i][2])][1])
right = int(tree[int(tree[i][3])][1])
if tree[i][1] == '-':
tree[i] = (i, left - right)
elif tree[i][1] == '+':
tree[i] = (i, left + right)
elif tree[i][1] == '*':
tree[i] = (i, left * right)
else:
tree[i] = (i, left / right)
print("#{} {}".format(tc, int(tree[1][1])))
| null | SWEA/1232-사칙연산.py | 1232-사칙연산.py | py | 647 | python | en | code | null | code-starcoder2 | 51 |
383096748 | import numpy as np
import matplotlib.pyplot as plt
import bicycledataprocessor as bdp
import canonical_system_id as csi
# This gives the proportion of the lateral force which should be added to the
# steer torque and roll torque equations in the canonical equations.
F = {}
for rider in ['Charlie', 'Jason', 'Luke']:
F[rider] = csi.whipple_state_space(rider, 1.0)[2][2:]
# find the runs that we want to id
dataset = bdp.DataSet()
dataset.open()
table = dataset.database.root.runTable
runs = []
for row in table.iterrows():
con = []
con.append(row['Rider'] in ['Jason', 'Charlie', 'Luke'])
con.append(row['Maneuver'] in ['Balance',
'Track Straight Line',
'Balance With Disturbance',
'Track Straight Line With Disturbance'])
con.append(row['Environment'] == 'Horse Treadmill')
con.append(row['corrupt'] is not True)
con.append(int(row['RunID']) > 100)
if False not in con:
runs.append(row['RunID'])
dataset.close()
idMassMats = np.zeros((len(runs), 2, 2))
idDampMats = np.zeros((len(runs), 2, 2))
idStifMats = np.zeros((len(runs), 2, 2))
speeds = np.nan * np.ones(len(runs))
thetaDelta = ['m21', 'm22', 'c21', 'c22', 'k21', 'k22']
errors = []
for i, r in enumerate(runs):
try:
trial = bdp.Run(r, dataset, filterFreq=15.)
except bdp.bdpexceptions.TimeShiftError:
errors.append(r)
except IndexError:
errors.append(r)
else:
if trial.metadata['Maneuver'].endswith('Disturbance'):
thetaPhi = ['m11', 'm12', 'c11', 'c12', 'k11', 'k12']
else:
thetaPhi = ['c11', 'c12', 'k11', 'k12']
v = trial.taskSignals['ForwardSpeed'].mean()
speeds[i] = v
g = trial.bicycleRiderParameters['g']
M, C1, K0, K2 = trial.bicycle.canonical(nominal=True)
C = C1 * v
K = K0 * g + K2 * v**2
canon = (M, C, K)
timeSeries = csi.time_series(trial, F)
M_id, C_id, K_id = csi.compute_unknowns(thetaPhi, thetaDelta,
timeSeries, canon)
idMassMats[i] = M_id
idDampMats[i] = C_id
idStifMats[i] = K_id
#forces_id = np.dot(M_id, accels) + np.dot(C_id, rates) + np.dot(K_id,
#coordinates)
#
#time = trial.taskSignals['ForwardSpeed'].time()
#fig = plt.figure()
#for i in range(2):
#ax = fig.add_subplot(2, 1, i + 1)
#ax.plot(time, forces[i], time, forces_id[i])
#ax.legend(('Experimental', 'Identified'))
#fig.show()
fig = plt.figure()
for i in range(2):
ax = fig.add_subplot(2, 6, 1 + i * 6)
ax.plot(speeds, idMassMats[:, i, 0], '.')
ax = fig.add_subplot(2, 6, 2 + i * 6)
ax.plot(speeds, idMassMats[:, i, 1], '.')
ax = fig.add_subplot(2, 6, 3 + i * 6)
ax.plot(speeds, idDampMats[:, i, 0], '.')
ax = fig.add_subplot(2, 6, 4 + i * 6)
ax.plot(speeds, idDampMats[:, i, 1], '.')
ax = fig.add_subplot(2, 6, 5 + i * 6)
ax.plot(speeds, idStifMats[:, i, 0], '.')
ax = fig.add_subplot(2, 6, 6 + i * 6)
ax.plot(speeds, idStifMats[:, i, 1], '.')
fig.show()
| null | scripts/canonicalid/fit_canonical.py | fit_canonical.py | py | 3,197 | python | en | code | null | code-starcoder2 | 51 |
156326706 | # coding:utf-8
# --author-- lanhua.zhou
import maya.cmds as cmds
import zfused_maya.node.core.check as check
import zfused_maya.node.core.clear as clear
import zfused_maya.widgets.checkwidget as checkwidget
import zfused_maya.tool.modeling.materialcheck as materialcheck
class ShadingCheck(checkwidget.CheckWidget):
def __init__(self):
super(ShadingCheck, self).__init__()
self._init()
self._check_all()
self.recheck_button.clicked.connect(self._check_all)
@classmethod
def Reset(cls):
cls.value = False
def _check_all(self):
_is_ok = True
for widget in self.allCheckWidget:
if self.auto_clear():
widget.clear()
value = widget.check()
if not value:
_is_ok = False
widget.setHidden(False)
else:
if not self.show_all():
widget.setHidden(True)
else:
widget.setHidden(False)
checkwidget.CheckWidget.value = _is_ok
print(checkwidget.CheckWidget.value)
check.Check.value = _is_ok
if _is_ok:
self.close()
def show(self):
import zfused_maya.core.restricted as restricted
import maya.cmds as cmds
_has_per, _info = restricted.restricted()
if not _has_per:
cmds.confirmDialog(message = _info)
return
super(ShadingCheck, self).show()
def _init(self):
self.set_title_name(u"材质文件检查")
#check file name
widget = checkwidget.ItemWidget(u"检查文件名", check.file_name, None)
self.add_widget(widget)
#check transform attr
widget = checkwidget.ItemWidget(u"检查通道属性值", _check_attr, None)
self.add_widget(widget)
#check rendering hierarchy
widget = checkwidget.ItemWidget(u"检查文件结构", _check_hierarchy, None, False)
self.add_widget(widget)
#check history
widget = checkwidget.ItemWidget(u"检查模型历史", _check_history, None, False)
self.add_widget(widget)
#check equal widget
widget = checkwidget.ItemWidget(u"检查相同模型", _check_equalmesh, None)
self.add_widget(widget)
#check reference
widget = checkwidget.ItemWidget(u"检查动画层", check.animation_layer, clear.animation_layer)
self.add_widget(widget)
widget = checkwidget.ItemWidget(u"检查未知节点", check.unknown_node, clear.unknown_node)
self.add_widget(widget)
#check un exists files
widget = checkwidget.ItemWidget(u"检查贴图文件是否不存在", check.file_node, None, False)
self.add_widget(widget)
widget = checkwidget.ItemWidget(u"检查摄像机", check.camera, clear.camera)
self.add_widget(widget)
widget = checkwidget.ItemWidget(u"检查灯光文件", check.light, clear.light)
self.add_widget(widget)
widget = checkwidget.ItemWidget(u"检查动画曲线", check.anim_curve, clear.anim_curve)
self.add_widget(widget)
widget = checkwidget.ItemWidget(u"检查显示层", check.display_layer, clear.display_layer)
self.add_widget(widget)
widget = checkwidget.ItemWidget(u"检查渲染层", check.render_layer, clear.render_layer)
self.add_widget(widget)
widget = checkwidget.ItemWidget(u"检查命名空间", check.namespace, clear.namespace)
self.add_widget(widget)
widget = checkwidget.ItemWidget(u"检查重命名", check.repeat, None, False)
self.add_widget(widget)
#check texture path
widget = checkwidget.ItemWidget(u"检查贴图路径", check.texture_path, None, False)
self.add_widget(widget)
widget = checkwidget.ItemWidget(u"检查材质命名", _check_material, materialcheck.CheckShader().repair, False)
self.add_widget(widget)
widget = checkwidget.ItemWidget(u"检查贴图命名", _check_tex_name, None, False)
self.add_widget(widget)
def _check_attr():
#get all transform
_un = ["front","persp","side","top"]
_all_trans = cmds.ls(type = "transform")
_use_tans = list(set(_all_trans) - set(_un))
_de = []
for _tans in _use_tans:
_t = cmds.getAttr("%s.translate"%_tans)
_r = cmds.getAttr("%s.rotate"%_tans)
_s = cmds.getAttr("%s.scale"%_tans)
_child = cmds.listRelatives(_tans, c = True, type = "mesh")
if _child:
if _t != [(0.0, 0.0, 0.0)] or _r != [(0.0, 0.0, 0.0)] or _s != [(1.0, 1.0, 1.0)]:
_de.append(_tans)
if _de:
info = u"通道属性值不为空\n"
for child in _de:
info += "{}\n".format(child)
return False,info
return True, None
def _check_history():
import pymel.core as pm
_history = []
allDags = pm.ls(dag = 1)
for dag in allDags:
_his = dag.history()
#_his = [n for n in dag.history(il=1, pdo = True)]
_his = [n for n in dag.history(il=1, pdo = True) if n.type() != "shadingEngine"]
if _his and dag.type() == "mesh":
_history.append(dag)
if _history:
_history = list(set(_history))
info = u"错误:部分模型存在历史记录\n"
for child in _history:
info += u"%s\n"%child
return False,info
else:
return True, None
def _check_hierarchy():
rendering = []
allDags = cmds.ls(dag = True)
for dag in allDags:
#print dag
#get
if cmds.objExists("%s.rendering"%dag):
value = cmds.getAttr("%s.rendering"%dag)
if value:
rendering.append(dag)
#return rendering
if not rendering:
info = u"文件组织结构错误,请用分组工具分组整合文件\n"
return False,info
else:
return True, None
def _check_equalmesh():
import maya.api.OpenMaya as om
_info = []
_error_meshs = []
_top_dags = cmds.ls(type = "mesh")
for _top_dag in _top_dags:
#get dag hierarchy
allDags = cmds.ls(_top_dag, dag = True, ni = True, type = "mesh")
# print allDags
for dag in allDags:
selectionList = om.MSelectionList()
selectionList.add( dag)
node = selectionList.getDependNode(0)
fnMesh = om.MFnMesh(node)
dag_info = ""
dag_info += " %s"%(fnMesh.numVertices)
dag_info += " %s"%(fnMesh.numEdges)
dag_info += " %s"%(fnMesh.numPolygons)
#_info.append(dag_info)
if dag_info in _info:
_error_meshs.append(fnMesh.name())
else:
_info.append(dag_info)
if _error_meshs:
_info = u"场景存在相同模型\n"
for _mesh in _error_meshs:
_info += "{}\n".format(_mesh)
return False, _info
return True, None
def _check_material():
_check = materialcheck.CheckShader()
_info = _check.check_shader()
if _info:
info = u"材质命名错误(无法修复的请检查是否是默认材质)\n"
info += "".join(sorted(_info))
return False, info
return True, None
def _check_tex_name():
_check = materialcheck.CheckShader()
_info = _check.check_texture()
if _info:
info = u"贴图命名错误,请手动检查\n"
info += "".join(sorted(_info))
return False, info
return True, None | null | zfused_maya/zfused_maya/tool/shading/shadingcheck.py | shadingcheck.py | py | 7,569 | python | en | code | null | code-starcoder2 | 51 |
423383072 | def multiplesOf3and5(num):
sum = 0
i = 1
while i < num:
mul3 = i % 3
mul5 = i % 5
if mul3 == 0 or mul5 == 0:
sum += i
i += 1
return sum
print(multiplesOf3and5(1000))
| null | Python/Problem 1: Multiples of 3 and 5.py | Problem 1: Multiples of 3 and 5.py | py | 182 | python | en | code | null | code-starcoder2 | 51 |
285790298 |
n=int(input("enter the no oftimes u want run the operation"))
name=list()
found=list()
def addword(name1):
name.append(name1)
def findprefix(pref):
#print(pref)
for i in name:
if i.startswith(pref):
found.append(i)
for x in found:
print(x,end=" ")
print(f"\ntotal-> {len(found)}")
for i in range(0,n):
word=input()
if word.startswith('add '):
length=len(word)
addword(word[4:length])
if word.startswith('find prefix '):
findprefix(word[12:len(word)])
| null | dictionaryProb.py | dictionaryProb.py | py | 586 | python | en | code | null | code-starcoder2 | 51 |
77461459 | from Tkconstants import LEFT, BOTH, BOTTOM, RIGHT
import Helper
import Menu
import Tkinter as tk
import yahoo_finance as yf
class Stocks(tk.Frame):
def __init__(self, parent, controller):
self.frame = tk.Frame
self.frame.__init__(self, parent, background='red')
self.parent = parent
self.controller = controller
self.stocksFrame = tk.Frame(self, background='black')
self.stocksFrame.pack(fill=BOTH, expand=True, side=BOTTOM)
self.shares = []
self.visible = False
self.helper = Helper.ButtonHelper()
self.initUI()
def initUI(self):
menuButton = self.helper.menuButton(self, 'Back', lambda: self.controller.show_frame(Menu.MainMenu))
refreshButton = self.helper.menuButton(self, 'Refresh', lambda: self.refreshStockData(self.stocksFrame))
menuButton.pack(side=LEFT)
refreshButton.pack(side=RIGHT)
parameters = ['Symbol', 'Price', 'Change', 'Volume']
for i in range(len(parameters)):
self.stocksFrame.columnconfigure(i, weight=1)
for i in range(len(self.shares)):
self.stocksFrame.rowconfigure(i, weight=1)
i = 0
for p in parameters:
heading = self.helper.label16(self.stocksFrame, p)
heading.grid(row=0, column=i, ipadx=10)
i += 1
def setStockData(self):
if self.visible:
print('Getting data...')
self.shares = ['AIA.NZ', 'AIR.NZ', 'ANZ.NZ', 'GNE.NZ', 'RYM.NZ']
self.shares.sort()
i = 1
for s in self.shares:
share = yf.Share(s)
label = share.get_info()['symbol']
print(label)
price = share.get_price()
change = share.get_change()
volume = share.get_volume()
parameters = [label, price, change, volume]
j=0
if change[0] == '+':
for p in parameters:
item = self.helper.label8(self.stocksFrame, p, 'green')
item.grid(row=i, column=j, ipadx=10)
j += 1
else:
for p in parameters:
item = self.helper.label8(self.stocksFrame, p, 'red')
item.grid(row=i, column=j, ipadx=10)
j += 1
i += 1
def refreshStockData(self):
self.visible = True
self.setStockData() | null | src/Stocks.py | Stocks.py | py | 2,636 | python | en | code | null | code-starcoder2 | 51 |
81257666 | from Crypto.Cipher import AES
from Crypto.Random import random
from Crypto.Util.number import long_to_bytes,bytes_to_long
with open("flag_cipher","r") as f:
c = f.read()
f.close()
c = [c[i:i+32] for i in range(0, len(c), 32)]
for i in range(1, len(c)-1):
cipher = AES.new(c[i], AES.MODE_ECB, "")
print(cipher.decrypt(c[i+1])) | null | crypto/[AFCTF2018]MyOwnCBC/fuck.py | fuck.py | py | 333 | python | en | code | null | code-starcoder2 | 51 |
122156768 | import arcpy
#CONSTANT DECLARATIONS
PRIORITY_FIELDNAME = "wdpaid" # this is a bit of a hack but basically we can use this as the priority field to ensure that if the feature in question overlaps the cell by less than 50% then it will be selected
#ENVIRONMENT VARIABLES
arcpy.env.overwriteOutput = True
arcpy.env.outputCoordinateSystem = "Coordinate Systems/Projected Coordinate Systems/World/WGS 1984 Web Mercator.prj"
#PARAMETERS
paFL = arcpy.GetParameterAsText(0)
outputFile = arcpy.GetParameterAsText(1)
#LOGIC
arcpy.env.snapRaster = r"E:\cottaan\My Documents\ArcGIS\Default.gdb\SnapGridWebMercator"
arcpy.PolygonToRaster_conversion(paFL,"wdpaid", outputFile, "MAXIMUM_AREA", PRIORITY_FIELDNAME,"1222.9924525618553") # 1222.9924525618553 is the grid size for zoom level 15 in Web Mercator | null | src/ProtectedAreaToRaster.py | ProtectedAreaToRaster.py | py | 799 | python | en | code | null | code-starcoder2 | 51 |
253514360 | import logging
import MySQLdb
from common import ItemContainsNull
class MySqlPipeline(object):
def open_spider(self, spider):
self.conn = MySQLdb.connect('IP', 'USERNAME', 'PASSWORD', 'TABLENAME', charset="utf8", use_unicode=True)
self.cursor = self.conn.cursor()
def close_spider(self, spider):
self.conn.close();
def process_item(self, item, spider):
for i in item.iteritems():
if not ItemContainsNull(i):
self.cursor.execute("""INSERT INTO Product (Type, Price, Date, Quantity, Store, Name)
VALUES (%s, %s, %s, %s, %s, %s)""",
(i[1]['itemType'],
i[1]['price'],
i[1]['date'],
i[1]['quantity'],
i[1]['store'],
i[1]['name']))
self.conn.commit() | null | PriceInformation/pipelines.py | pipelines.py | py | 724 | python | en | code | null | code-starcoder2 | 51 |
357031212 | from random import randint as age
prove = print
class Phil:
In = {
2002: "I graduated from EE, TKU, and served in the Army.",
2004: "I entered an IC design house writing ATE programs.",
2005: "I enrolled in EE, NTNU for a master's degree.",
'the present': "I've become a teacher, sysadmin and developer.",
'the future': "I am starting up into the IoT."
}
class Me(Phil):
def __init__(self):
self.life = range(36, age(37,99))
def hack(it):
try:
prove(it)
except:
die()
new = Me()
for everything in new.life:
hack(everything)
| null | PP.py | PP.py | py | 620 | python | en | code | null | code-starcoder2 | 51 |
614471316 | # -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import optparse
import os
import platform
import re
import sys
from pyversion import is_python3
if is_python3():
import urllib.parse
else:
import imp
import urlparse
urllib = imp.new_module('urllib')
urllib.parse = urlparse
from color import Coloring
from command import InteractiveCommand, MirrorSafeCommand
from error import ManifestParseError
from project import SyncBuffer
from git_config import GitConfig
from git_command import git_require, MIN_GIT_VERSION_SOFT, MIN_GIT_VERSION_HARD
import platform_utils
from wrapper import Wrapper
class Init(InteractiveCommand, MirrorSafeCommand):
common = True
helpSummary = "Initialize repo in the current directory"
helpUsage = """
%prog [options]
"""
helpDescription = """
The '%prog' command is run once to install and initialize repo.
The latest repo source code and manifest collection is downloaded
from the server and is installed in the .repo/ directory in the
current working directory.
The optional -b argument can be used to select the manifest branch
to checkout and use. If no branch is specified, the remote's default
branch is used.
The optional -m argument can be used to specify an alternate manifest
to be used. If no manifest is specified, the manifest default.xml
will be used.
The --reference option can be used to point to a directory that
has the content of a --mirror sync. This will make the working
directory use as much data as possible from the local reference
directory when fetching from the server. This will make the sync
go a lot faster by reducing data traffic on the network.
The --dissociate option can be used to borrow the objects from
the directory specified with the --reference option only to reduce
network transfer, and stop borrowing from them after a first clone
is made by making necessary local copies of borrowed objects.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
# Switching Manifest Branches
To switch to another manifest branch, `repo init -b otherbranch`
may be used in an existing client. However, as this only updates the
manifest, a subsequent `repo sync` (or `repo sync -d`) is necessary
to update the working directory files.
"""
def _Options(self, p, gitc_init=False):
# Logging
g = p.add_option_group('Logging options')
g.add_option('-v', '--verbose',
dest='output_mode', action='store_true',
help='show all output')
g.add_option('-q', '--quiet',
dest='output_mode', action='store_false',
help='only show errors')
# Manifest
g = p.add_option_group('Manifest options')
g.add_option('-u', '--manifest-url',
dest='manifest_url',
help='manifest repository location', metavar='URL')
g.add_option('-b', '--manifest-branch',
dest='manifest_branch',
help='manifest branch or revision', metavar='REVISION')
cbr_opts = ['--current-branch']
# The gitc-init subcommand allocates -c itself, but a lot of init users
# want -c, so try to satisfy both as best we can.
if not gitc_init:
cbr_opts += ['-c']
g.add_option(*cbr_opts,
dest='current_branch_only', action='store_true',
help='fetch only current manifest branch from server')
g.add_option('-m', '--manifest-name',
dest='manifest_name', default='default.xml',
help='initial manifest file', metavar='NAME.xml')
g.add_option('--mirror',
dest='mirror', action='store_true',
help='create a replica of the remote repositories '
'rather than a client working directory')
g.add_option('--reference',
dest='reference',
help='location of mirror directory', metavar='DIR')
g.add_option('--dissociate',
dest='dissociate', action='store_true',
help='dissociate from reference mirrors after clone')
g.add_option('--depth', type='int', default=None,
dest='depth',
help='create a shallow clone with given depth; see git clone')
g.add_option('--partial-clone', action='store_true',
dest='partial_clone',
help='perform partial clone (https://git-scm.com/'
'docs/gitrepository-layout#_code_partialclone_code)')
g.add_option('--clone-filter', action='store', default='blob:none',
dest='clone_filter',
help='filter for use with --partial-clone [default: %default]')
# TODO(vapier): Expose option with real help text once this has been in the
# wild for a while w/out significant bug reports. Goal is by ~Sep 2020.
g.add_option('--worktree', action='store_true',
help=optparse.SUPPRESS_HELP)
g.add_option('--archive',
dest='archive', action='store_true',
help='checkout an archive instead of a git repository for '
'each project. See git archive.')
g.add_option('--submodules',
dest='submodules', action='store_true',
help='sync any submodules associated with the manifest repo')
g.add_option('-g', '--groups',
dest='groups', default='default',
help='restrict manifest projects to ones with specified '
'group(s) [default|all|G1,G2,G3|G4,-G5,-G6]',
metavar='GROUP')
g.add_option('-p', '--platform',
dest='platform', default='auto',
help='restrict manifest projects to ones with a specified '
'platform group [auto|all|none|linux|darwin|...]',
metavar='PLATFORM')
g.add_option('--clone-bundle', action='store_true',
help='force use of /clone.bundle on HTTP/HTTPS (default if not --partial-clone)')
g.add_option('--no-clone-bundle',
dest='clone_bundle', action='store_false',
help='disable use of /clone.bundle on HTTP/HTTPS (default if --partial-clone)')
g.add_option('--no-tags',
dest='tags', default=True, action='store_false',
help="don't fetch tags in the manifest")
# Tool
g = p.add_option_group('repo Version options')
g.add_option('--repo-url',
dest='repo_url',
help='repo repository location', metavar='URL')
g.add_option('--repo-rev', metavar='REV',
help='repo branch or revision')
g.add_option('--repo-branch', dest='repo_rev',
help=optparse.SUPPRESS_HELP)
g.add_option('--no-repo-verify',
dest='repo_verify', default=True, action='store_false',
help='do not verify repo source code')
# Other
g = p.add_option_group('Other options')
g.add_option('--config-name',
dest='config_name', action="store_true", default=False,
help='Always prompt for name/e-mail')
def _RegisteredEnvironmentOptions(self):
return {'REPO_MANIFEST_URL': 'manifest_url',
'REPO_MIRROR_LOCATION': 'reference'}
def _SyncManifest(self, opt):
m = self.manifest.manifestProject
is_new = not m.Exists
if is_new:
if not opt.manifest_url:
print('fatal: manifest url (-u) is required.', file=sys.stderr)
sys.exit(1)
if not opt.quiet:
print('Downloading manifest from %s' %
(GitConfig.ForUser().UrlInsteadOf(opt.manifest_url),),
file=sys.stderr)
# The manifest project object doesn't keep track of the path on the
# server where this git is located, so let's save that here.
mirrored_manifest_git = None
if opt.reference:
manifest_git_path = urllib.parse.urlparse(opt.manifest_url).path[1:]
mirrored_manifest_git = os.path.join(opt.reference, manifest_git_path)
if not mirrored_manifest_git.endswith(".git"):
mirrored_manifest_git += ".git"
if not os.path.exists(mirrored_manifest_git):
mirrored_manifest_git = os.path.join(opt.reference,
'.repo/manifests.git')
m._InitGitDir(mirror_git=mirrored_manifest_git)
self._ConfigureDepth(opt)
# Set the remote URL before the remote branch as we might need it below.
if opt.manifest_url:
r = m.GetRemote(m.remote.name)
r.url = opt.manifest_url
r.ResetFetch()
r.Save()
if opt.manifest_branch:
m.revisionExpr = opt.manifest_branch
else:
if is_new:
default_branch = m.ResolveRemoteHead()
if default_branch is None:
# If the remote doesn't have HEAD configured, default to master.
default_branch = 'refs/heads/master'
m.revisionExpr = default_branch
else:
m.PreSync()
groups = re.split(r'[,\s]+', opt.groups)
all_platforms = ['linux', 'darwin', 'windows']
platformize = lambda x: 'platform-' + x
if opt.platform == 'auto':
if (not opt.mirror and
not m.config.GetString('repo.mirror') == 'true'):
groups.append(platformize(platform.system().lower()))
elif opt.platform == 'all':
groups.extend(map(platformize, all_platforms))
elif opt.platform in all_platforms:
groups.append(platformize(opt.platform))
elif opt.platform != 'none':
print('fatal: invalid platform flag', file=sys.stderr)
sys.exit(1)
groups = [x for x in groups if x]
groupstr = ','.join(groups)
if opt.platform == 'auto' and groupstr == 'default,platform-' + platform.system().lower():
groupstr = None
m.config.SetString('manifest.groups', groupstr)
if opt.reference:
m.config.SetString('repo.reference', opt.reference)
if opt.dissociate:
m.config.SetString('repo.dissociate', 'true')
if opt.worktree:
if opt.mirror:
print('fatal: --mirror and --worktree are incompatible',
file=sys.stderr)
sys.exit(1)
if opt.submodules:
print('fatal: --submodules and --worktree are incompatible',
file=sys.stderr)
sys.exit(1)
m.config.SetString('repo.worktree', 'true')
if is_new:
m.use_git_worktrees = True
print('warning: --worktree is experimental!', file=sys.stderr)
if opt.archive:
if is_new:
m.config.SetString('repo.archive', 'true')
else:
print('fatal: --archive is only supported when initializing a new '
'workspace.', file=sys.stderr)
print('Either delete the .repo folder in this workspace, or initialize '
'in another location.', file=sys.stderr)
sys.exit(1)
if opt.mirror:
if is_new:
m.config.SetString('repo.mirror', 'true')
else:
print('fatal: --mirror is only supported when initializing a new '
'workspace.', file=sys.stderr)
print('Either delete the .repo folder in this workspace, or initialize '
'in another location.', file=sys.stderr)
sys.exit(1)
if opt.partial_clone:
if opt.mirror:
print('fatal: --mirror and --partial-clone are mutually exclusive',
file=sys.stderr)
sys.exit(1)
m.config.SetString('repo.partialclone', 'true')
if opt.clone_filter:
m.config.SetString('repo.clonefilter', opt.clone_filter)
else:
opt.clone_filter = None
if opt.clone_bundle is None:
opt.clone_bundle = False if opt.partial_clone else True
else:
m.config.SetString('repo.clonebundle', 'true' if opt.clone_bundle else 'false')
if opt.submodules:
m.config.SetString('repo.submodules', 'true')
if not m.Sync_NetworkHalf(is_new=is_new, quiet=opt.quiet, verbose=opt.verbose,
clone_bundle=opt.clone_bundle,
current_branch_only=opt.current_branch_only,
tags=opt.tags, submodules=opt.submodules,
clone_filter=opt.clone_filter):
r = m.GetRemote(m.remote.name)
print('fatal: cannot obtain manifest %s' % r.url, file=sys.stderr)
# Better delete the manifest git dir if we created it; otherwise next
# time (when user fixes problems) we won't go through the "is_new" logic.
if is_new:
platform_utils.rmtree(m.gitdir)
sys.exit(1)
if opt.manifest_branch:
m.MetaBranchSwitch(submodules=opt.submodules)
syncbuf = SyncBuffer(m.config)
m.Sync_LocalHalf(syncbuf, submodules=opt.submodules)
syncbuf.Finish()
if is_new or m.CurrentBranch is None:
if not m.StartBranch('default'):
print('fatal: cannot create default in manifest', file=sys.stderr)
sys.exit(1)
def _LinkManifest(self, name):
if not name:
print('fatal: manifest name (-m) is required.', file=sys.stderr)
sys.exit(1)
try:
self.manifest.Link(name)
except ManifestParseError as e:
print("fatal: manifest '%s' not available" % name, file=sys.stderr)
print('fatal: %s' % str(e), file=sys.stderr)
sys.exit(1)
def _Prompt(self, prompt, value):
print('%-10s [%s]: ' % (prompt, value), end='')
# TODO: When we require Python 3, use flush=True w/print above.
sys.stdout.flush()
a = sys.stdin.readline().strip()
if a == '':
return value
return a
def _ShouldConfigureUser(self, opt):
gc = self.client.globalConfig
mp = self.manifest.manifestProject
# If we don't have local settings, get from global.
if not mp.config.Has('user.name') or not mp.config.Has('user.email'):
if not gc.Has('user.name') or not gc.Has('user.email'):
return True
mp.config.SetString('user.name', gc.GetString('user.name'))
mp.config.SetString('user.email', gc.GetString('user.email'))
if not opt.quiet:
print()
print('Your identity is: %s <%s>' % (mp.config.GetString('user.name'),
mp.config.GetString('user.email')))
print("If you want to change this, please re-run 'repo init' with --config-name")
return False
def _ConfigureUser(self, opt):
mp = self.manifest.manifestProject
while True:
if not opt.quiet:
print()
name = self._Prompt('Your Name', mp.UserName)
email = self._Prompt('Your Email', mp.UserEmail)
if not opt.quiet:
print()
print('Your identity is: %s <%s>' % (name, email))
print('is this correct [y/N]? ', end='')
# TODO: When we require Python 3, use flush=True w/print above.
sys.stdout.flush()
a = sys.stdin.readline().strip().lower()
if a in ('yes', 'y', 't', 'true'):
break
if name != mp.UserName:
mp.config.SetString('user.name', name)
if email != mp.UserEmail:
mp.config.SetString('user.email', email)
def _HasColorSet(self, gc):
for n in ['ui', 'diff', 'status']:
if gc.Has('color.%s' % n):
return True
return False
def _ConfigureColor(self):
gc = self.client.globalConfig
if self._HasColorSet(gc):
return
class _Test(Coloring):
def __init__(self):
Coloring.__init__(self, gc, 'test color display')
self._on = True
out = _Test()
print()
print("Testing colorized output (for 'repo diff', 'repo status'):")
for c in ['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan']:
out.write(' ')
out.printer(fg=c)(' %-6s ', c)
out.write(' ')
out.printer(fg='white', bg='black')(' %s ' % 'white')
out.nl()
for c in ['bold', 'dim', 'ul', 'reverse']:
out.write(' ')
out.printer(fg='black', attr=c)(' %-6s ', c)
out.nl()
print('Enable color display in this user account (y/N)? ', end='')
# TODO: When we require Python 3, use flush=True w/print above.
sys.stdout.flush()
a = sys.stdin.readline().strip().lower()
if a in ('y', 'yes', 't', 'true', 'on'):
gc.SetString('color.ui', 'auto')
def _ConfigureDepth(self, opt):
"""Configure the depth we'll sync down.
Args:
opt: Options from optparse. We care about opt.depth.
"""
# Opt.depth will be non-None if user actually passed --depth to repo init.
if opt.depth is not None:
if opt.depth > 0:
# Positive values will set the depth.
depth = str(opt.depth)
else:
# Negative numbers will clear the depth; passing None to SetString
# will do that.
depth = None
# We store the depth in the main manifest project.
self.manifest.manifestProject.config.SetString('repo.depth', depth)
def _DisplayResult(self, opt):
if self.manifest.IsMirror:
init_type = 'mirror '
else:
init_type = ''
if not opt.quiet:
print()
print('repo %shas been initialized in %s' %
(init_type, self.manifest.topdir))
current_dir = os.getcwd()
if current_dir != self.manifest.topdir:
print('If this is not the directory in which you want to initialize '
'repo, please run:')
print(' rm -r %s/.repo' % self.manifest.topdir)
print('and try again.')
def ValidateOptions(self, opt, args):
if opt.reference:
opt.reference = os.path.expanduser(opt.reference)
# Check this here, else manifest will be tagged "not new" and init won't be
# possible anymore without removing the .repo/manifests directory.
if opt.archive and opt.mirror:
self.OptionParser.error('--mirror and --archive cannot be used together.')
if args:
self.OptionParser.error('init takes no arguments')
def Execute(self, opt, args):
git_require(MIN_GIT_VERSION_HARD, fail=True)
if not git_require(MIN_GIT_VERSION_SOFT):
print('repo: warning: git-%s+ will soon be required; please upgrade your '
'version of git to maintain support.'
% ('.'.join(str(x) for x in MIN_GIT_VERSION_SOFT),),
file=sys.stderr)
opt.quiet = opt.output_mode is False
opt.verbose = opt.output_mode is True
rp = self.manifest.repoProject
# Handle new --repo-url requests.
if opt.repo_url:
remote = rp.GetRemote('origin')
remote.url = opt.repo_url
remote.Save()
# Handle new --repo-rev requests.
if opt.repo_rev:
wrapper = Wrapper()
remote_ref, rev = wrapper.check_repo_rev(
rp.gitdir, opt.repo_rev, repo_verify=opt.repo_verify, quiet=opt.quiet)
branch = rp.GetBranch('default')
branch.merge = remote_ref
rp.work_git.reset('--hard', rev)
branch.Save()
if opt.worktree:
# Older versions of git supported worktree, but had dangerous gc bugs.
git_require((2, 15, 0), fail=True, msg='git gc worktree corruption')
self._SyncManifest(opt)
self._LinkManifest(opt.manifest_name)
if os.isatty(0) and os.isatty(1) and not self.manifest.IsMirror:
if opt.config_name or self._ShouldConfigureUser(opt):
self._ConfigureUser(opt)
self._ConfigureColor()
self._DisplayResult(opt)
| null | subcmds/init.py | init.py | py | 20,026 | python | en | code | null | code-starcoder2 | 51 |
375771591 | # This code is mainly excerpted from openai baseline code.
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
from abc import ABC,abstractmethod
from multiprocessing import Process, Pipe
from monitor import Monitor
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env=None):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
super(EpisodicLifeEnv, self).__init__(env)
self.lives = 0
self.was_real_done = True
self.was_real_reset = False
def _step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def _reset(self):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
self.was_real_reset = True
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.was_real_reset = False
self.lives = self.env.unwrapped.ale.lives()
return obs
class NoopResetEnv(gym.Wrapper):
def __init__(self, env=None, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
super(NoopResetEnv, self).__init__(env)
self.noop_max = noop_max
self.override_num_noops = None
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def _reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = np.random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(0)
if done:
obs = self.env.reset()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env=None, skip=4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=2)
self._skip = skip
def _step(self, action):
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def _reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env=None):
"""For environments where the user need to press FIRE for the game to start."""
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def _reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class ProcessFrame84(gym.ObservationWrapper):
def __init__(self, env=None):
super(ProcessFrame84, self).__init__(env)
self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))
def _observation(self, obs):
return ProcessFrame84.process(obs)
@staticmethod
def process(frame):
if frame.size == 210 * 160 * 3:
img = np.reshape(frame, [210, 160, 3]).astype(np.float32)
elif frame.size == 250 * 160 * 3:
img = np.reshape(frame, [250, 160, 3]).astype(np.float32)
else:
assert False, "Unknown resolution."
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
class ImageToPyTorch(gym.ObservationWrapper):
"""
Change image shape to CWH
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=(old_shape[-1], old_shape[0], old_shape[1]))
def _observation(self, observation):
return np.swapaxes(observation, 2, 0)
class ClippedRewardsWrapper(gym.RewardWrapper):
def _reward(self, reward):
"""Change all the positive rewards to 1, negative to -1 and keep zero."""
return np.sign(reward)
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not belive how complex the previous solution was."""
self._frames = frames
def __array__(self, dtype=None):
out = np.concatenate(self._frames, axis=0)
if dtype is not None:
out = out.astype(dtype)
return out
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0]*k, shp[1], shp[2]))
def _reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def _step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class VecEnv(ABC):
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
"""
An abstract asynchronous, vectorized environment.
"""
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: an array of info objects
"""
pass
@abstractmethod
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def render(self):
logger.warn('Render not defined for %s'%self)
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def wrap(env):
"""Apply a common set of wrappers for Atari games."""
assert 'NoFrameskip' in env.spec.id
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ProcessFrame84(env)
env = ImageToPyTorch(env)
env = FrameStack(env, 4)
return env
def wrap_cover(env_name):
def wrap_():
"""Apply a common set of wrappers for Atari games."""
env = gym.make(env_name)
env = Monitor(env, './')
assert 'NoFrameskip' in env.spec.id
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ProcessFrame84(env)
env = ImageToPyTorch(env)
env = FrameStack(env, 4)
env = ClippedRewardsWrapper(env)
return env
return wrap_ | null | Distributional_RL/wrappers.py | wrappers.py | py | 13,103 | python | en | code | null | code-starcoder2 | 51 |
279632711 | import argparse
import tokenizer
def read_data_from_file(file_path):
f = open(file_path, 'r')
ret = f.read()
f.close()
return ret
def main(args):
dict_tokens = tokenizer.Tokenizer(read_data_from_file(args.dict))
dict_set = set()
cur = dict_tokens.next_token()
while cur is not None:
dict_set.add(cur[0])
cur = dict_tokens.next_token()
file_tokens = tokenizer.Tokenizer(read_data_from_file(args.file))
cur = file_tokens.next_token()
while cur is not None:
if cur[0] not in dict_set:
print(cur)
cur = file_tokens.next_token()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'dict'
)
parser.add_argument(
'file'
)
main(parser.parse_args()) | null | semester-1/fundamentals-of-computer-science/python/4-mistakes.py | 4-mistakes.py | py | 806 | python | en | code | null | code-starcoder2 | 51 |
242464468 | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 09:33, 16/03/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import concurrent.futures as parallel
from functools import partial
import numpy as np
from mealpy.optimizer import Optimizer
class BaseGA(Optimizer):
"""
Genetic Algorithm (GA)
Link:
https://blog.sicara.com/getting-started-genetic-algorithms-python-tutorial-81ffa1dd72f9
https://www.tutorialspoint.com/genetic_algorithms/genetic_algorithms_quick_guide.htm
https://www.analyticsvidhya.com/blog/2017/07/introduction-to-genetic-algorithm/
"""
def __init__(self, problem, epoch=10000, pop_size=100, pc=0.95, pm=0.025, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
pc (float): cross-over probability, default = 0.95
pm (float): mutation probability, default = 0.025
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = 2 * pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
self.pc = pc
self.pm = pm
def create_child(self, agent_i, pop_copy, list_fitness):
### Selection
# c1, c2 = self._get_parents_kway_tournament_selection__(pop, k_way=0.2)
id_c1 = self.get_index_roulette_wheel_selection(list_fitness)
id_c2 = self.get_index_roulette_wheel_selection(list_fitness)
w1 = pop_copy[id_c1][self.ID_POS]
w2 = pop_copy[id_c2][self.ID_POS]
### Crossover
if np.random.uniform() < self.pc:
w1, w2 = self.crossover_arthmetic_recombination(w1, w2)
### Mutation, remove third loop here
w1 = np.where(np.random.uniform(0, 1, self.problem.n_dims) < self.pm, np.random.uniform(self.problem.lb, self.problem.ub), w1)
w2 = np.where(np.random.uniform(0, 1, self.problem.n_dims) < self.pm, np.random.uniform(self.problem.lb, self.problem.ub), w2)
if np.random.uniform() < 0.5:
return [w1.copy(), self.get_fitness_position(w1)]
else:
return [w2.copy(), self.get_fitness_position(w2)]
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
# c1, c2 = self._get_parents_kway_tournament_selection__(pop, k_way=0.2)
list_fitness = np.array([agent[self.ID_FIT][self.ID_TAR] for agent in pop])
pop_copy = pop.copy()
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, list_fitness=list_fitness), pop)
pop = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, list_fitness=list_fitness), pop)
pop = [x for x in pop_child]
else:
pop = [self.create_child(agent, pop_copy, list_fitness) for agent in pop]
return pop
| null | mealpy/evolutionary_based/GA.py | GA.py | py | 4,291 | python | en | code | null | code-starcoder2 | 51 |
615031076 | from flask import Flask,request,jsonify
import telebot
import json
token="781229574:AAGC6K39EQ1VNcf2RTOlLpXg_KWoHPAZTI"
app = Flask(__name__)
bot=telebot.TeleBot(token)
@app.route('/',methods=["POST","GET"])
def hello_world():
bot.set_webhook("https://weatherbetabot.herokuapp.com/")
if request.method == "POST":
r=json.loads(request.data)
chat_id=r['message']['chat']['id']
bot.send_message(chat_id,'it work')
return (jsonify(r),200)
else:
return ('Hi',200,None)
| null | app/__init__.py | __init__.py | py | 522 | python | en | code | null | code-starcoder2 | 51 |
6181075 | #import gevent.monkey
#gevent.monkey.patch_all()
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, render_template, request, Response
from flask_socketio import SocketIO, join_room, emit
import game
from game import RequestDenied
# initialize Flask
from pylti.flask import lti
VERSION = '0.0.1'
app = Flask(__name__)
app.config.from_object('config')
socketio = SocketIO(app)
ROOMS = {} # dict to track active rooms
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
firstname = db.Column(db.String(80), nullable=False)
lastname = db.Column(db.String(80), nullable=False)
lti_user_id = db.Column(db.String(255), unique=True, nullable=False)
def __repr__(self):
return '<User %r>' % self.username
def to_dict(self):
return({ 'id': self.id,
'username': self.username,
'firstname': self.firstname,
'lastname': self.lastname,
'lti_user_id': self.lti_user_id })
def error(exception=None):
""" render error page
:param exception: optional exception
:return: the error.html template rendered
"""
return render_template('error.html')
import io
import random
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_svg import FigureCanvasSVG
from matplotlib.figure import Figure
from matplotlib.patches import Ellipse
@app.route("/mapping_diagram-<int:N>-<int:seed>.svg")
def mapping_diagram(N=5, seed=0):
""" renders the plot on the fly.
"""
fig = Figure()
random.seed(int(seed))
x = [random.randint(-10,10) for i in range(N)]
y = [random.randint(-10,10) for i in range(N)]
app.logger.error(x)
app.logger.error(y)
inputs = list(set(x))
outputs = list(set(y))
n = max(len(inputs),len(outputs))
axis = fig.add_subplot(1, 1, 1)
ells = [Ellipse((0, -float(n-1)/2), n, 2, 90), Ellipse((3, -float(n-1)/2), n, 2, 90)]
for e in ells:
axis.add_artist(e)
axis.axis('off')
axis.scatter([-1,4,4],[1,-len(inputs),-len(outputs)],marker=",",alpha=0)
for i in range(len(inputs)):
#axis.annotate(str(inputs[i]),(-i,0))
axis.text(0,-i,str(inputs[i]))
for j in range(len(outputs)):
#axis.annotate(str(outputs[j]),(-j,3))
axis.text(3,-j,str(outputs[j]))
for x_,y_ in zip(x,y):
i = inputs.index(x_)
j = outputs.index(y_)
axis.arrow(0.2,-i, 2.7, -(j-i), head_width=0.1, head_length=0.1, fc='k', ec='k')
output = io.BytesIO()
FigureCanvasSVG(fig).print_svg(output)
return Response(output.getvalue(), mimetype="image/svg+xml")
@app.route("/graph-<int:N>-<int:seed>.svg")
def plot_svg(N=50, seed=0):
""" renders the plot on the fly.
"""
fig = Figure()
random.seed(int(seed))
x = [random.randint(-10,10) for i in range(N)]
y = [random.randint(-10,10) for i in range(N)]
axis = fig.add_subplot(1, 1, 1)
axis.scatter(x, y)
output = io.BytesIO()
FigureCanvasSVG(fig).print_svg(output)
return Response(output.getvalue(), mimetype="image/svg+xml")
@app.route('/memory_lti/', methods=['GET', 'POST'])
@lti(request='initial', error=error, app=app)
def memory_init(lti=lti):
""" initial access page to the lti provider. This page provides
authorization for the user.
:param lti: the `lti` object from `pylti`
:return: index page for lti provider
"""
user = db.session.query(User).filter_by(lti_user_id=lti.name).first()
if user:
#return render_template('memory.html')
return render_template('connect4.html')
#return render_template('index.html', user=user)
else:
form = UserInfoForm()
return render_template('GetUserInfo.html', lti=lti, form=form)
@app.route('/memory')
@lti(request='session', error=error, app=app)
def memory():
"""Serve the index HTML"""
return render_template('memory.html')
@socketio.on('create')
@lti(request='session', error=error, app=app)
def on_create(data, lti=lti):
"""Create a game lobby"""
#username = data['username']
#gm = game.Game(deck_name='RelationDiagrams')
#gm = game.MemoryGame(deck_name='clt1')
gm = game.ConnectFourGame(deck_name='clt1')
room = gm.room
ROOMS[room] = gm
data['room'] = room
on_join(data)
#join_room(room)
#emit('join_room', {'room': room})
@socketio.on('disconnect')
@lti(request='session', error=error, app=app)
def disconnect(lti=lti):
for room in ROOMS:
player = ROOMS[room].get_player(request.sid)
if player:
ROOMS[room].remove_player(player)
reset_game(room)
@socketio.on('join')
@lti(request='session', error=error, app=app)
def on_join(data, lti=lti):
print("joining room")
"""Join a game lobby"""
#username = data['username']
room = data['room']
print(lti)
user = db.session.query(User).filter_by(lti_user_id=lti.name).first()
if room in ROOMS:
# add player and rebroadcast game object
try:
ROOMS[room].add_player(request.sid, user)
except RequestDenied as err:
emit('error', {'error': 'Unable to join room. {:s}'.format(err.message)})
join_room(room)
#send(ROOMS[room].to_json(), room=room)
emit('join_room', {'room': room})
reset_game(room)
else:
emit('error', {'error': 'Unable to join room. Room does not exist.'})
@socketio.on('input')
@lti(request='session', error=error, app=app)
def input(data, lti=lti):
print("receiving input")
"""submit response and rebroadcast game object"""
room = data['room']
response = data['response']
player = ROOMS[room].get_player(request.sid)
try:
ROOMS[room].input(player, response, update_game)
except RequestDenied as err:
print(err.message)
def update_game(room):
print("updating game")
emit('update_game', {'flipped_cards': [card.to_dict() for card in ROOMS[room].flipped_cards], 'players': [player.to_dict() for player in ROOMS[room].players], 'active_player': ROOMS[room].active_player, 'dice': ROOMS[room].dice, 'selectable_cards': [card.to_dict() for card in ROOMS[room].selectable_cards]}, room=room)
def reset_game(room):
print("reseting game")
emit('reset_game', {'flipped_cards': [card.to_dict() for card in ROOMS[room].flipped_cards], 'players': [player.to_dict() for player in ROOMS[room].players], 'active_player': ROOMS[room].active_player, 'dice': ROOMS[room].dice, 'selectable_cards': [card.to_dict() for card in ROOMS[room].selectable_cards]}, room=room)
@socketio.on('roll')
@lti(request='session', error=error, app=app)
def on_roll(data, lti=lti):
"""flip card and rebroadcast game object"""
print("flipping card")
room = data['room']
player = ROOMS[room].get_player(request.sid)
try:
assert player is not None
except AssertionError:
emit('error', {'error': 'Unable to flip card. Player {:s} not in game'.format(request.sid)})
try:
ROOMS[room].roll(player, lambda x,y: emit('select', { 'player': player.to_dict(), 'x': x, 'y': y }, room=request.sid))
update_game(room)
except RequestDenied as err:
print(err.message)
#send(ROOMS[room].to_json(), room=room)
@socketio.on('flip_card')
@lti(request='session', error=error, app=app)
def on_flip_card(data, lti=lti):
"""flip card and rebroadcast game object"""
print("flipping card")
room = data['room']
card = int(data['card'])
player = ROOMS[room].get_player(request.sid)
try:
assert player is not None
except AssertionError:
emit('error', {'error': 'Unable to flip card. Player {:s} not in game'.format(request.sid)})
try:
ROOMS[room].select_card(player, card, lambda: emit('prompt', { 'player': player.to_dict() }, room=request.sid))
update_game(room)
except RequestDenied as err:
print(err.message)
#send(ROOMS[room].to_json(), room=room)
#@socketio.on('submit_answer')
#def on_submit_answer(data):
# """flip card and rebroadcast game object"""
# room = data['room']
# answer = data['answer']
# ROOMS[room].flip_card(card)
# send(ROOMS[room].to_json(), room=room)
if __name__ == '__main__':
socketio.run(app, debug=True, host='0.0.0.0')
| null | memory.py | memory.py | py | 8,399 | python | en | code | null | code-starcoder2 | 51 |
367143759 | import operator
def calculate_distance(self):
all_distances = []
for house in self.houses.values():
x_house, y_house = house.x, house.y
house_diff = {}
counter = 0
for battery in self.batteries.values():
x_batt, y_batt = battery.x, battery.y
x_diff = abs(x_batt - x_house)
y_diff = abs(y_batt - y_house)
house_diff[counter] = (x_diff + y_diff)
counter += 1
house_diff = dict(sorted(house_diff.items(), key=operator.itemgetter(1)))
all_distances.append(house_diff)
house.dists = house_diff
return all_distances
def sort_linked_houses(self, battery):
"""
Sorts list of linked houses of a battery by distances
"""
distance_list = []
for house in battery.linked_houses:
batts = list(house.diffs.keys())
distance = []
weight = 50 / house.output
for diff in list(house.diffs.values()):
weighted_diff = diff * weight
distance.append(weighted_diff)
# distance = list(house.diffs.values())
# print(weight)
# print(distance)
houses = [house] * len(distance)
outputs = [house.output] * len(distance)
element = []
element = list(map(list, zip(batts, distance, houses, outputs)))
distance_list += element
return sorted(distance_list, key=operator.itemgetter(1))
def find_best(self, list, status):
"""
Tries to find either the cheapest house to possibly switch from battery
or the one with the lowest possible output
"""
if status is "strict":
for option in list:
a = self.batteries[option[0]].filled() + option[2].output
b = self.batteries[option[0]].capacity
c = b - a
if a <= b and not 7 < c < 35:
return option[2], self.batteries[option[0]]
# wordt vervangen door output gewicht
else:
list = sorted(list, key=operator.itemgetter(3))
for option in list:
if (option[2].link.filled() - option[2].output) < option[2].link.capacity:
print(option[2].link.filled() - option[2].output)
return option[2], self.batteries[option[0]]
# conditie toevoegen om te zorgen dat huizen niet op een batterij komen die verder dan een max afstand ligt
# conditie toevoegen om te zorgen dat een huis niet wordt verplaatst als dat de batterij nét niet onder full brengt
def swap_houses(self, house, current_batt, next_batt, changes):
"""
Switches house from battery it's currently linked to, to the next
one
"""
house.link = next_batt
next_batt.linked_houses.append(house)
current_batt.linked_houses.remove(house)
print(f"house at x{house.x}/y{house.y} changed from battery at x{current_batt.x}/y{current_batt.y} to battery at x{next_batt.x}/y{next_batt.y}")
print(f"house capacity = {house.output}")
print(f"capacity = {current_batt.filled()}")
print(f"changes = {changes}")
def switch_houses(self, house1, house2):
# print(f"house1 x{house1.x}/y{house1.y} battery at x{house1.link.x}/y{house1.link.y} --> at x{house2.link.x}/y{house2.link.y}")
# print(f"house2 x{house2.x}/y{house2.y} battery at x{house2.link.x}/y{house2.link.y} --> at x{house1.link.x}/y{house1.link.y}")
house2.link, house1.link = house1.link, house2.link
# print(f"chouse1 x{house1.x}/y{house1.y} now at x {house1.link.x}/y{house1.link.y} was at x{house2.link.x}/y{house2.link.y}")
# print(f"chouse2 x{house2.x}/y{house2.y} now at x {house2.link.x}/y{house2.link.y} was at x{house1.link.x}/y{house1.link.y}")
| null | test_scripts/test_area/hill_test/4th Time(ex-slowdown)/helpers.py | helpers.py | py | 3,652 | python | en | code | null | code-starcoder2 | 51 |
253470660 | # -*- coding: utf-8 -*-
from plone import api
from ftw.testbrowser import browsing
from ftw.testbrowser.pages import factoriesmenu
from opengever.testing import IntegrationTestCase
from zope.annotation.interfaces import IAnnotations
class TestCreateDocFromOneoffixxTemplate(IntegrationTestCase):
def setUp(self):
super(TestCreateDocFromOneoffixxTemplate, self).setUp()
self.activate_feature("officeconnector-checkout")
self.activate_feature("oneoffixx")
@browsing
def test_document_creation_from_oneoffixx_template_creates_shadow_doc(self, browser):
self.login(self.regular_user, browser)
browser.open(self.dossier)
factoriesmenu.add('document_with_oneoffixx_template')
node = browser.css("#form-widgets-template-2574d08d-95ea-4639-beab-3103fe4c3bc7").first
browser.fill({'Title': 'A doc'})
browser.fill({'Template': node.get("title")})
browser.find('Save').click()
self.assertEqual('document-state-shadow',
api.content.get_state(browser.context))
self.assertTrue(browser.context.is_shadow_document())
@browsing
def test_template_id_stored_in_annotations(self, browser):
self.login(self.regular_user, browser)
browser.open(self.dossier)
factoriesmenu.add('document_with_oneoffixx_template')
node = browser.css("#form-widgets-template-2574d08d-95ea-4639-beab-3103fe4c3bc7").first
browser.fill({'Title': 'A doc'})
browser.fill({'Template': node.get("title")})
browser.find('Save').click()
annotations = IAnnotations(browser.context)
self.assertEqual(node.get("value"), annotations['template-id'])
class TestOneOffixxTemplateFeature(IntegrationTestCase):
@browsing
def test_doc_from_oneoffixx_template_available_if_oneoffixxtemplate_feature_enabled(self, browser):
self.activate_feature("officeconnector-checkout")
self.login(self.manager, browser)
browser.open(self.dossier)
self.assertEquals(
['Document',
'document_with_template',
'Task',
'Add task from template',
'Subdossier',
'Participant'],
factoriesmenu.addable_types())
self.activate_feature("oneoffixx")
browser.open(self.dossier)
self.assertEquals(
['Document',
'document_with_template',
'document_with_oneoffixx_template',
'Task',
'Add task from template',
'Subdossier',
'Participant'],
factoriesmenu.addable_types())
| null | opengever/oneoffixx/tests/test_oneoffixx.py | test_oneoffixx.py | py | 2,659 | python | en | code | null | code-starcoder2 | 51 |
3002260 |
# coding: utf-8
# In[1]:
import helper
import matplotlib.pyplot as plt
from keras.applications import *
from keras.callbacks import EarlyStopping
import os
# In[2]:
#设置各种参数
train_path = ['./data/train2/cat', './data/train2/dog']
test_path ='./data/test1/test1'
img_size =(299,299)
layer_num = 125
model_image ='./models/model_image_fine_tuning_xception_0403_02.png'
model_weights_file = './models/weights_fine_tuning_xception_no_outliers_0403_02.h5'
template_csv_path = './predicts/sample_submission.csv'
target_csv_path = './predicts/pred_fine_tuning_xception_no_outliers_0403_02.csv'
MODEL = xception.Xception
preprocess_func = xception.preprocess_input
# In[3]:
#获取训练集数据
X_train, y_train, image_files= helper.get_train_input_from_folder_with_subclasses(train_path, img_size, lambda_func=preprocess_func)
print("finished")
# In[4]:
#构造模型,锁定base_model所有层
model = helper.get_fine_tuning_first_model(MODEL)
#可视化模型
helper.visualize_model(model, model_image)
print("finished")
# In[5]:
print("start")
#第一次训练新添加层权重
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')
model.fit(X_train, y_train, batch_size=128, epochs=8, validation_split=0.2, callbacks=[early_stopping])
print("finished")
# In[6]:
print("start")
#放开若干层权重,再次训练
model = helper.get_fine_tuning_second_model(model, layer_num)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')
model.fit(X_train, y_train, batch_size=128, epochs=60, validation_split=0.2, callbacks=[early_stopping])
print("finished")
# In[7]:
#保存模型参数
model.save_weights(model_weights_file)
del X_train
del y_train
print("finished")
# In[8]:
print("start")
#获取测试数据和对应文件
X_test, test_files = helper.get_input_from_folder_with_image_files(test_path, img_size, lambda_func=preprocess_func)
#获取文件basename
image_file_names = [os.path.basename(path) for path in test_files]
#预测并保存预测结果到csv
helper.predict_and_update_to_csv(model, X_test, image_file_names, template_csv_path, target_csv_path)
print("finished")
| null | p6_p7/py/fine_tuning_xception_no_outliers_final.py | fine_tuning_xception_no_outliers_final.py | py | 2,225 | python | en | code | null | code-starcoder2 | 51 |
126437936 | from core.exceptions.exceptions import OptionValidationError
class Option(object):
def __init__(self, default, advanced=False):
self.label = None
try:
self.advanced = bool(advanced)
except ValueError:
raise OptionValidationError("Invalid value. Cannot cast '{}' to bool".format(advanced))
if default or default == 0:
self.__set__("", default)
elif isinstance(self, OptList):
self.value = {}
self.display_value = {}
else:
self.display_value = ""
self.value = ""
def __get__(self, instance, owner):
return self.value
class OptPort(Option):
""" Option Port attribute """
def __set__(self, instance, value):
try:
value = int(value)
if 0 < value <= 65535: # max port number is 65535
self.display_value = str(value)
self.value = value
else:
raise OptionValidationError("Invalid option. Port value should be between 0 and 65536.")
except ValueError:
raise OptionValidationError("Invalid option. Cannot cast '{}' to integer.".format(value))
class OptBool(Option):
""" Option Bool attribute """
def __init__(self, default, advanced=False):
if default:
self.display_value = "true"
else:
self.display_value = "false"
self.value = default
try:
self.advanced = bool(advanced)
except ValueError:
raise OptionValidationError("Invalid value. Cannot cast '{}' to boolean.".format(advanced))
def __set__(self, instance, value):
if value == "true":
self.value = True
self.display_value = value
elif value == "false":
self.value = False
self.display_value = value
else:
raise OptionValidationError("Invalid value. It should be true or false.")
class OptInteger(Option):
""" Option Integer attribute """
def __set__(self, instance, value):
try:
self.display_value = str(value)
self.value = int(value)
except ValueError:
try:
self.value = int(value, 16)
except ValueError:
raise OptionValidationError("Invalid option. Cannot cast '{}' to integer.".format(value))
class OptFloat(Option):
""" Option Float attribute """
def __set__(self, instance, value):
try:
self.display_value = str(value)
self.value = float(value)
except ValueError:
raise OptionValidationError("Invalid option. Cannot cast '{}' to float.".format(value))
class OptString(Option):
""" Option String attribute """
def __set__(self, instance, value):
try:
self.value = self.display_value = str(value)
except ValueError:
raise OptionValidationError("Invalid option. Cannot cast '{}' to string.".format(value))
class OptList(Option):
def __add__(self, other):
try:
self.value.extend(other)
self.display_value.extend(other)
except ValueError:
raise OptionValidationError("Invalid option. Cannot append '{}' to array".format(other))
| null | core/resources/Option.py | Option.py | py | 3,323 | python | en | code | null | code-starcoder2 | 51 |
628289279 | #!/usr/bin/env python3
import tensorflow
import tensorflow.compat.v1 as tf
from IPython import embed
class Net(object):
''' CNN base
'''
_X = None
_y = None
_num_labels = None
_one_hot_y = None
_mean = None
_stddev = None
_saver = None
_learn_rate = None
_dropout = None
_accuracy_operation = None
_training_operation = None
def __init__(self, X: tf.Variable, y: tf.Variable, num_labels: int, mean: float=0.0, stddev: float=0.1):
self._X = X
self._y = y
self._num_labels = num_labels
self._one_hot_y = tf.one_hot(self._y, self._num_labels)
self._mean = mean
self._stddev = stddev
self._learn_rate = 0.001
self._dropout = tf.placeholder(tf.float32, name='dropout') # dropout rate
self.init_structure()
def init_structure(self):
pass
def _get_initialized_var(self, shape):
return tf.truncated_normal(shape=shape, mean=self._mean, stddev=self._stddev)
def _create_conv_layer(self, input_var, num_output_features, ksize, strides, name, use_subsample=True):
num_input_features = input_var.shape[-1].value
conv_W = tf.Variable(self._get_initialized_var(shape=(ksize, ksize, num_input_features, num_output_features)))
conv_b = tf.Variable(tf.zeros(num_output_features))
conv = tf.nn.conv2d(input_var, conv_W, strides=[1, strides, strides, 1], padding='VALID') + conv_b
conv = tf.nn.relu(conv)
print('shape=%s before max_pool' % (conv.shape))
if use_subsample:
conv = tf.nn.max_pool(conv, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='VALID', name=name)
return conv, conv_W
def _create_full_connected_layer(self, input_var, num_output, name):
num_input = input_var.shape[-1].value
fc_W = tf.Variable(self._get_initialized_var(shape=(num_input, num_output)), name=name + '_W')
fc_b = tf.Variable(tf.zeros(num_output), name=name + '_b')
fc = tf.nn.xw_plus_b(input_var, fc_W, fc_b)
return fc, fc_W
@property
def X(self):
return self._X
@property
def y(self):
return self._y
@property
def dropout(self):
return self._dropout
@property
def logits(self):
return self._logits
@property
def accuracy_operation(self):
return self._accuracy_operation
@property
def train_operation(self):
return self._training_operation
@property
def saver(self):
return self._saver
def __call__(self, session, X_data, y_data, keep_prob: float, batch_size: int):
return self.evaluate(session, X_data, y_data, keep_prob, batch_size)
def train(self, session, X_data, y_data, keep_prob):
session.run(self._training_operation,
feed_dict={self.X: X_data, self.y: y_data, self.dropout: keep_prob})
def evaluate(self, session, X_data, y_data, keep_prob, batch_size: int):
num_examples = len(X_data)
total_accuracy = 0
for offset in range(0, num_examples, batch_size):
batch_x, batch_y = X_data[offset: offset + batch_size], y_data[offset: offset + batch_size]
accuracy = session.run(self.accuracy_operation,
feed_dict={self.X: batch_x, self.y: batch_y, self.dropout: keep_prob})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
def save(self, session):
self.saver.save(session, './%s.ckpt' % self)
def load(self, session):
self.saver.restore(session, './%s.ckpt' % self)
class LeNet5(Net):
def __str__(self):
return 'LeNet5'
def init_structure(self):
self._conv1, conv1_W = self._create_conv_layer(self._X, num_output_features=6, ksize=5, strides=1, name='conv1')
self._conv2, conv2_W = self._create_conv_layer(self._conv1, num_output_features=16, ksize=5, strides=1, name='conv2')
self._fc0 = tf.layers.Flatten()(self._conv2)
self._fc1, fc1_W = self._create_full_connected_layer(self._fc0, 120, 'fc1')
self._fc1 = tf.nn.relu(self._fc1)
self._fc2, fc2_W = self._create_full_connected_layer(self._fc1, 84, 'fc2')
self._fc2 = tf.nn.relu(self._fc2)
self._fc3, fc3_W = self._create_full_connected_layer(self._fc2, self._num_labels, 'fc3')
self._logits = self._fc3
# Convert logits int0 proper probablity expression
self._cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self._one_hot_y, logits=self._logits)
self._loss_operation = tf.reduce_mean(self._cross_entropy)
self._optimizer = tf.train.AdamOptimizer(learning_rate=self._learn_rate)
self._training_operation = self._optimizer.minimize(self._loss_operation)
# correct prediction for evaluation
self._correct_prediction = tf.equal(tf.argmax(self._logits, 1), tf.argmax(self._one_hot_y, 1))
self._accuracy_operation = tf.reduce_mean(tf.cast(self._correct_prediction, tf.float32))
self._saver = tf.train.Saver()
def __repr__(self):
return '%s\n%s\n%s\n%s\n%s\n%s\n%s\n' % (self._X, self._conv1, self._conv2, self._fc0, self._fc1, self._fc2, self._fc3)
class MyNet(Net):
def __str__(self):
return 'MyNet'
def init_structure(self):
#
# Net definition
#
self._conv1, conv1_W = self._create_conv_layer(self._X, num_output_features=6, ksize=5, strides=1, name='conv1')
self._conv2, conv2_W = self._create_conv_layer(self._conv1, num_output_features=16, ksize=5, strides=1, name='conv2')
self._conv3, conv3_W = self._create_conv_layer(self._conv2, num_output_features=400, ksize=5, strides=1, name='conv3', use_subsample=False)
self._fc0 = tf.concat([tf.layers.Flatten()(self._conv2), tf.layers.Flatten()(self._conv3)], 1)
self._fc0 = tf.nn.dropout(self._fc0, keep_prob=self._dropout)
self._fc1, fc1_W = self._create_full_connected_layer(self._fc0, self._num_labels, name='fc1')
self._logits = self._fc1
#
# Training pipeline
#
beta = 0.0001
# Add all weights to regularization term
reg_term = tf.nn.l2_loss(conv1_W) + \
tf.nn.l2_loss(conv2_W) + \
tf.nn.l2_loss(conv3_W) + \
tf.nn.l2_loss(fc1_W)
# Convert logits into proper probablity expression
self._optimizer = tf.train.AdamOptimizer(learning_rate=self._learn_rate)
self._cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self._one_hot_y, logits=self._logits)
self._loss_operation = tf.reduce_mean(self._cross_entropy + beta * reg_term)
self._training_operation = self._optimizer.minimize(self._loss_operation)
#
# Evaluation pipeline
#
# correct prediction for evaluation
self._correct_prediction = tf.equal(tf.argmax(self._logits, 1), tf.argmax(self._one_hot_y, 1))
self._accuracy_operation = tf.reduce_mean(tf.cast(self._correct_prediction, tf.float32))
self._saver = tf.train.Saver()
def __repr__(self):
return '%s\n%s\n%s\n%s\n%s\n' % (self._X, self._conv1, self._conv2, self._fc0, self._fc1)
| null | python/model.py | model.py | py | 7,311 | python | en | code | null | code-starcoder2 | 51 |
405732069 | def run(level):
with open(f'output/{level}.txt', 'r') as f:
s0 = [int(x[:-1]) for x in f.readlines()]
with open(f'result/{level}.normal', 'r') as f:
s1 = [int(x[:-1]) for x in f.readlines()]
ret = [(max(s1[idx], s0[idx])/(min(s1[idx], s0[idx])+1e-1), idx) for idx in range(len(s1))]
ret.sort(reverse=True)
with open(f'analyse/{level}.csv', 'w') as f:
for x, idx in ret:
print(f'{idx+1}, {s1[idx]}, {s0[idx]}, {x}', file=f)
if __name__ == "__main__":
run('easy')
run('middle')
run('hard') | null | lab2/analyse.py | analyse.py | py | 560 | python | en | code | null | code-starcoder2 | 51 |
407870143 | from django import template
from django.templatetags.static import static
register = template.Library()
# Django incluison tag plays elegant way to separete bootstrap template logic
# from app template, that separation is need for theme the projects_type
# Pass in kwargs the elements to fill the cards
# Please note that all templates are contained in cards
# You are free to arrange them in grids or other elements
@register.inclusion_tag('includes/merge_html.html')
def merge_html(*args, **kwargs):
html_text = {}
for html_piece in args:
html_text.update(html_piece)
return {"html_text":html_text,
}
| null | core/templatetags/general_tags.py | general_tags.py | py | 640 | python | en | code | null | code-starcoder2 | 51 |
407203490 | import sys
import os
import argparse
# Add parent directory to path to import general.py
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Import ../general.py
from general import *
TEST_NAME = "network"
CLIENT_NAME = "client-ngtcp2"
CLIENT_IMPLEMENTATION = "ngtcp2"
QUIC_RESULTS_DIR = "/root/quic-results"
def run_test_client(client_container_id, client_name, server_name, network_setting, resource):
print("starting test client to test " + server_name)
if client_container_id is None:
print("cannot run test client, no container")
exit(-1)
command = "docker exec -i " + client_container_id + \
" python /scripts/network/network-client-test.py --client " + client_name + \
" --server " + server_name + " --network_setting " + network_setting + " --resource " + resource
print("test client command: " + command)
run_call_command(command)
def run_test_server(container_id, server_name, network_setting, resource):
print("starting test server to test " + server_name)
if container_id is None:
print("cannot run server, no container")
exit(-1)
command = "docker exec -i -d " + container_id + " python -u /scripts/network/network-emu-server-test.py --server " + \
server_name + " --network_setting " + network_setting + " --resource " + resource
print("test server command: " + command)
run_subprocess_command(command)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--amount_of_runs', help='Amount of times the compliance tests need to be run',
nargs='?', const=1, type=int, default=1)
args = parser.parse_args()
implementations = [
"ngtcp2",
"quicker",
"quant"
]
network_settings = [
"wifi",
"wifi_transatlantic_loss",
"4g",
"2g_loss"
]
resources = [
#"index.html",
"large-text.txt",
"image.jpg"
]
remove_containers()
client_container_id = None
for x in range(0, args.amount_of_runs):
update_start_time()
for resource in resources:
for implementation in implementations:
for network_setting in network_settings:
container_id = create_server_container(TEST_NAME,
implementation)
client_container_id = restart_test_client(TEST_NAME,
CLIENT_IMPLEMENTATION, CLIENT_NAME, client_container_id, implementation)
run_test_server(container_id, implementation,
network_setting, resource)
run_test_client(client_container_id, CLIENT_IMPLEMENTATION,
implementation, network_setting, resource)
remove_container(container_id)
remove_container(client_container_id)
print("network test done")
if __name__ == "__main__":
main()
| null | scripts/network/network-emu-test.py | network-emu-test.py | py | 2,979 | python | en | code | null | code-starcoder2 | 51 |
606423008 |
import numpy as np
import paddle
from tqdm import tqdm
from .abc_interpreter import Interpreter
from ..data_processor.readers import preprocess_inputs, preprocess_save_path
from ..data_processor.visualizer import explanation_to_vis, show_vis_explanation, save_image
class SmoothGradInterpreter(Interpreter):
"""
Smooth Gradients Interpreter.
Smooth Gradients method solves the problem of meaningless local variations in partial derivatives
by adding random noise to the inputs multiple times and take the average of the
gradients.
More details regarding the Smooth Gradients method can be found in the original paper:
http://arxiv.org/pdf/1706.03825.pdf
"""
def __init__(self,
paddle_model,
use_cuda=True,
model_input_shape=[3, 224, 224]):
"""
Initialize the SmoothGradInterpreter.
Args:
paddle_model (callable): A paddle model that outputs predictions.
use_cuda (bool, optional): Whether or not to use cuda. Default: True
model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]
"""
Interpreter.__init__(self)
self.paddle_model = paddle_model
self.model_input_shape = model_input_shape
self.data_type = 'float32'
self.paddle_prepared = False
self.use_cuda = use_cuda
if not paddle.is_compiled_with_cuda():
self.use_cuda = False
def interpret(self,
inputs,
labels=None,
noise_amount=0.1,
n_samples=50,
visual=True,
save_path=None):
"""
Main function of the interpreter.
Args:
inputs (str or list of strs or numpy.ndarray): The input image filepath or a list of filepaths or numpy array of read images.
labels (list or tuple or numpy.ndarray, optional): The target labels to analyze. The number of labels should be equal to the number of images. If None, the most likely label for each image will be used. Default: None
noise_amount (float, optional): Noise level of added noise to the image.
The std of Guassian random noise is noise_amount * (x_max - x_min). Default: 0.1
n_samples (int, optional): The number of new images generated by adding noise. Default: 50
visual (bool, optional): Whether or not to visualize the processed image. Default: True
save_path (str or list of strs or None, optional): The filepath(s) to save the processed image(s). If None, the image will not be saved. Default: None
:return: interpretations/gradients for each image
:rtype: numpy.ndarray
"""
imgs, data = preprocess_inputs(inputs, self.model_input_shape)
bsz = len(data)
save_path = preprocess_save_path(save_path, bsz)
data_type = np.array(data).dtype
self.data_type = data_type
if not self.paddle_prepared:
self._paddle_prepare()
if labels is None:
_, preds = self.predict_fn(data, None)
labels = preds
labels = np.array(labels).reshape((len(imgs), 1))
max_axis = tuple(np.arange(1, data.ndim))
stds = noise_amount * (
np.max(data, axis=max_axis) - np.min(data, axis=max_axis))
total_gradients = np.zeros_like(data)
for i in tqdm(range(n_samples)):
noise = np.concatenate([
np.float32(
np.random.normal(0.0, stds[j], (1, ) + tuple(d.shape)))
for j, d in enumerate(data)
])
data_noised = data + noise
gradients, _ = self.predict_fn(data_noised, labels)
total_gradients += gradients
avg_gradients = total_gradients / n_samples
# visualization and save image.
for i in range(len(imgs)):
print(imgs[i].shape, avg_gradients[i].shape)
vis_explanation = explanation_to_vis(imgs[i], np.abs(avg_gradients[i]).sum(0), style='overlay_grayscale')
if visual:
show_vis_explanation(vis_explanation)
if save_path[i] is not None:
save_image(save_path[i], vis_explanation)
return avg_gradients
def _paddle_prepare(self, predict_fn=None):
if predict_fn is None:
paddle.set_device('gpu:0' if self.use_cuda else 'cpu')
# to get gradients, the ``train`` mode must be set.
self.paddle_model.train()
for n, v in self.paddle_model.named_sublayers():
if "batchnorm" in v.__class__.__name__.lower():
v._use_global_stats = True
if "dropout" in v.__class__.__name__.lower():
v.p = 0
def predict_fn(data, labels):
data = paddle.to_tensor(data)
data.stop_gradient = False
out = self.paddle_model(data)
out = paddle.nn.functional.softmax(out, axis=1)
preds = paddle.argmax(out, axis=1)
if labels is None:
labels = preds.numpy()
labels_onehot = paddle.nn.functional.one_hot(
paddle.to_tensor(labels), num_classes=out.shape[1])
target = paddle.sum(out * labels_onehot, axis=1)
# gradients = paddle.grad(outputs=[target], inputs=[data])[0]
target.backward()
gradients = data.grad
if isinstance(gradients, paddle.Tensor):
gradients = gradients.numpy()
return gradients, labels
self.predict_fn = predict_fn
self.paddle_prepared = True
| null | interpretdl/interpreter/smooth_grad.py | smooth_grad.py | py | 5,840 | python | en | code | null | code-starcoder2 | 51 |
367475446 | import xml.etree.ElementTree as ET
import cv2
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
from pathlib import Path
global radius
radius = 5
def visualize_hsv(flow, name):
flow = flow.astype("float32")
hsv = np.zeros((flow.shape[0], flow.shape[1], 3))
hsv[..., 1] = 255
# flowの大きさと角度を計算
mag, ang = cv2.cartToPolar(flow[..., 1], flow[..., 0])
# OpenCVのhueは0~180で表現
hsv[..., 0] = ang * 180 / np.pi / 2
# 強さを0~255で正規化
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
# これないとエラー
hsv = hsv.astype("uint8")
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite(name, rgb)
# 各座標のベクトル本数とベクトルを返す
def compute_vector(black, pre, nxt, result, result_y, result_x, sgm):
# v' = p(t+1) - p(t)
# 単位ベクトル化(v = v'/|v'|)
v = nxt - pre
if np.linalg.norm(v) != 0:
v = v / np.linalg.norm(v)
# 法線ベクトル上下の定義してsgm倍(sgmはハイパーパラメータ)
up = np.array([-v[1], v[0]]) * sgm
dw = np.array([v[1], -v[0]]) * sgm
# (p(t)の座標 or p(t+1)の座標)と法線ベクトル2種の和
v1 = up + nxt + radius
v2 = dw + nxt + radius
v3 = up + pre + radius
v4 = dw + pre + radius
# p(t+1)とp(t)を結ぶ線分を囲む4点
points = np.round(
np.array([[v1[0], v1[1]], [v2[0], v2[1]], [v4[0], v4[1]], [v3[0], v3[1]]])
)
img_t = black.copy()
img_y = black.copy()
img_x = black.copy()
img_z = black.copy()
# points4点で囲む領域を1に
img_t = cv2.fillPoly(img=img_t, pts=np.int32([points]), color=1)
# img_t = cv2.circle(img_t, (pre[0] + radius, pre[1] + radius), radius, (1), thickness=-1, lineType=cv2.LINE_4)
# img_t = cv2.circle(img_t, (nxt[0] + radius, nxt[1] + radius), radius, (1), thickness=-1, lineType=cv2.LINE_4)
# v = nxt - pre
# v = np.append(v, 1)
# v = v / np.linalg.norm(v)
img_y[img_t != 0] = v[1]
img_x[img_t != 0] = v[0]
# img_z[img_t != 0] = v[2]
# どんどんベクトル追加
result = result + img_t
# ベクトルもとりあえず和でOK(あとで平均取る)
result_x = result_x + img_x
result_y = result_y + img_y
# result_z = result_z + img_z
return result, result_y, result_x
def generate_flow(track_let, save_path, itv=1, height=1040, width=1392):
track_let = track_let.astype(int)
i = np.unique(track_let[:, 0])[0]
ids = np.unique(track_let[:, 1])
output = []
# いろいろ使う黒画像(0行列)
black = np.zeros((height + radius * 2, width + radius * 2, 1))
par_id = -1
# resultは各座標に何本ベクトルがあるか(かぶっていたら2とか3とか)
# result_y,result_x は出力ベクトル
result = black.copy()
result_y = black.copy()
result_x = black.copy()
for j in ids:
# i+1のフレーム
index_check = len(track_let[(track_let[:, 0] == i) & (track_let[:, 1] == j)])
index_chnxt = len(
track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)]
)
if index_chnxt != 0:
par_id = track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][
0, -1
]
# 前後のframeがあるとき(dataはframe(t)の座標、dnxtはframe(t+1)の座標)
if (index_check != 0) & (index_chnxt != 0):
data = track_let[(track_let[:, 0] == i) & (track_let[:, 1] == j)][0]
dnxt = track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][0]
pre = data[2:-1]
nxt = dnxt[2:-1]
result, result_y, result_x = compute_vector(
black, pre, nxt, result, result_y, result_x, SGM
)
# 前は無いが、親がいるとき
elif (index_check == 0) & (index_chnxt != 0) & (par_id != -1):
# 親細胞のframe(t)座標
if (
len(track_let[(track_let[:, 0] == i) & (track_let[:, 1] == par_id)])
!= 0
):
data = track_let[(track_let[:, 0] == i) & (track_let[:, 1] == par_id)][
0
]
dnxt = track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][
0
]
pre = data[2:-1]
nxt = dnxt[2:-1]
result, result_y, result_x = compute_vector(
black, pre, nxt, result, result_y, result_x, SGM
)
else:
print(
track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][0]
)
# パディングを消す
result = result[radius:-radius, radius:-radius]
print(i, "to", i + itv, result.max())
# 0で割れないので1に
result_org = result.copy()
result[result == 0] = 1
# パディングを消す
result_y = result_y[radius:-radius, radius:-radius]
result_x = result_x[radius:-radius, radius:-radius]
# result_z = result_z[radius:-radius, radius:-radius]
result_x = result_x / result
result_y = result_y / result
# result_z = (result_z / result)
result_vector = np.concatenate((result_y, result_x), axis=-1)
visualize_hsv(
result_vector, str(save_path.parent.joinpath(save_path.name + ".png"))
)
# save_npy = save_path + '/{0:03d}.npy'.format(i)
# np.save(save_npy, result_vector.astype('float16'))
output.append(result_vector)
np_output = np.array(output).astype("float16")
np.save(str(save_path), np_output)
############################################################################################
SGM = 5 # CMFの幅/2の値
############################################################################################
if __name__ == "__main__":
seqs = [13]
time_lates = [1, 5, 9]
for time_late in time_lates:
for seq in seqs:
save_CMP_path = Path(f"/home/kazuya/main/weakly_tracking/images/sequ{seq}/CMF_6_{time_late}")
save_mask_path = save_CMP_path.parent.joinpath(f"mask_{time_late}")
save_CMP_path.mkdir(parents=True, exist_ok=True)
save_mask_path.mkdir(parents=True, exist_ok=True)
root_path = Path(f"../output/association/C2C12_9_{time_late}/sequ{seq}")
pred1_paths = sorted(root_path.glob("*/*_1.txt"))
pred2_paths = sorted(root_path.glob("*/*_2.txt"))
for frame, pred_path in enumerate(zip(pred1_paths, pred2_paths)):
# [x, y, cell_id, state]
pred1 = np.loadtxt(str(pred_path[0]), delimiter=",", skiprows=1)
# [x, y, cell_id, state]
pred2 = np.loadtxt(str(pred_path[1]), delimiter=",", skiprows=1)
track_let = np.zeros(((pred1.shape[0] + pred2.shape[0], 5)))
track_let[pred2.shape[0]:, 0] = 2
track_let[: pred2.shape[0], 0] = 1
track_let[pred2.shape[0]:, 2:4] = pred1[:, :2]
track_let[: pred2.shape[0], 2:4] = pred2[:, :2]
track_let[:, -1] = -1
track_let[:, 1] = -1
for index, pre in enumerate(pred1):
track_let[int(pred2.shape[0] + index), 1] = index
if pre[3] != -1:
track_let[int(pre[2]), 1] = index
track_let = track_let[track_let[:, 1] != -1]
exclude_cells = pred1[pred1[:, 3] == 2]
mask = np.zeros((512, 512))
for exclude_cell in exclude_cells:
mask = cv2.circle(
mask,
(int(exclude_cell[1]), int(exclude_cell[2])),
SGM * 3,
255,
-1,
)
exclude_cells = pred2[pred2[:, 3] == 0]
for exclude_cell in exclude_cells:
mask = cv2.circle(
mask,
(int(exclude_cell[1]), int(exclude_cell[2])),
SGM * 3,
255,
-1,
)
cv2.imwrite(
str(save_mask_path.joinpath(f"{frame:05d}.tif")),
mask.astype(np.uint8),
)
track_let = track_let[track_let[:, 1] != -1]
generate_flow(
track_let,
save_CMP_path.joinpath(f"{frame:05d}"),
height=512,
width=512,
)
print("finished")
| null | utils/cmf_gen_pseudo.py | cmf_gen_pseudo.py | py | 8,767 | python | en | code | null | code-starcoder2 | 51 |
442061271 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: Peter. Wong
# @Time: 2018/12/29 10:17
import numpy as np # numpy库
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor # 集成算法
from sklearn.model_selection import cross_val_score # 交叉检验
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score # 批量导入指标算法
import pandas as pd # 导入pandas
from sklearn import preprocessing # 导入归一化工具
# 数据准备
num_load = 6 # 载荷分量个数
num_result = 3 # 预测热点位置序号
raw_data = np.loadtxt('base.txt') # 读取数据文件
X0 = raw_data[:,:num_load] # 分割自变量
y0 = raw_data[:,num_load:]
y1 = raw_data[:, num_load+num_result].reshape(-1, 1) # 分割因变量
# y1 = np.zeros([X0.shape[0],1])
# for i, maxv in enumerate(y0):
# y1[i] = max(y0[i])
# 数据归一化
X = preprocessing.scale(X0)
scaler_in = preprocessing.StandardScaler().fit(X0)
y = preprocessing.scale(y1)
scaler_out = preprocessing.StandardScaler().fit(y1)
# 训练回归模型
n_folds = 6 # 设置交叉检验的次数
model_gbr = GradientBoostingRegressor(n_estimators=500,max_depth=3) # 建立梯度增强回归模型对象
model_names = ['GBR'] # 不同模型的名称列表
model_dic = [model_gbr] # 不同回归模型对象的集合
cv_score_list = [] # 交叉检验结果列表
pre_y_list = [] # 各个回归模型预测的y值列表
for model in model_dic: # 读出每个回归模型对象
scores = cross_val_score(model, X,y.ravel(), cv=n_folds) # 将每个回归模型导入交叉检验模型中做训练检验
cv_score_list.append(scores) # 将交叉检验结果存入结果列表
pre_y_list.append(scaler_out.inverse_transform(model.fit(X, y.ravel()).predict(X))) # 将回归训练中得到的预测y存入列表
# 模型效果指标评估
n_samples, n_features = X.shape # 总样本量,总特征数
model_metrics_name = [explained_variance_score, mean_absolute_error, mean_squared_error, r2_score] # 回归评估指标对象集
model_metrics_list = [] # 回归评估指标列表
for i in range(1): # 循环每个模型索引
tmp_list = [] # 每个内循环的临时结果列表
for m in model_metrics_name: # 循环每个指标对象
tmp_score = m(y, pre_y_list[i]) # 计算每个回归指标结果
tmp_list.append(tmp_score) # 将结果存入每个内循环的临时结果列表
model_metrics_list.append(tmp_list) # 将结果存入回归评估指标列表
df1 = pd.DataFrame(cv_score_list, index=model_names) # 建立交叉检验的数据框
df2 = pd.DataFrame(model_metrics_list, index=model_names, columns=['ev', 'mae', 'mse', 'r2']) # 建立回归指标的数据框
print ('samples: %d \t features: %d' % (n_samples, n_features)) # 打印输出样本量和特征数量
print (70 * '-') # 打印分隔线
print ('cross validation result:') # 打印输出标题
print (df1) # 打印输出交叉检验的数据框
# print (70 * '-') # 打印分隔线
# print ('regression metrics:') # 打印输出标题
# print (df2) # 打印输出回归指标的数据框
# print (70 * '-') # 打印分隔线
# print ('short name \t full name') # 打印输出缩写和全名标题
# print ('ev \t explained_variance')
# print ('mae \t mean_absolute_error')
# print ('mse \t mean_squared_error')
# print ('r2 \t r2')
# print (70 * '-') # 打印分隔线 | null | Data_Analysis/131-2300/GBR_VA.py | GBR_VA.py | py | 3,439 | python | en | code | null | code-starcoder2 | 51 |
581015693 | import re
from plugins.uptime import Uptime
from plugins.nsfw_image_detector import NSFWImageDetectorPlugin
from plugins.read_links import ReadLinks
from plugins.psywerx_history import PsywerxHistory
from plugins.psywerx_groups import PsywerxGroups
from plugins.psywerx_karma import PsywerxKarma
import settings
class BotLogic(object):
def __init__(self, bot):
self.bot = bot # reference back to asynchat handler
self.joined_channel = False
self.usertrim = re.compile('[!+@]')
self.bot.known_users = {} # dict of known users present in the channel
self.init_actions()
self.plugins = [
PsywerxHistory(bot=bot),
PsywerxKarma(bot=bot),
PsywerxGroups(bot=bot),
NSFWImageDetectorPlugin(bot=bot),
ReadLinks(bot=bot),
Uptime(bot=bot),
]
@staticmethod
def _get_action_code(line):
if line.startswith('ERROR'):
raise Exception('Unknown IRC error in line: ' + line)
if line.startswith('PING'):
return 'PING'
action = line.split(' ', 2)[1]
if action == '376':
return 'END_MOTD'
if action == '353':
return 'NAMES_LIST'
if action == '366':
return 'END_NAMES'
if action == '433':
return 'NICK_IN_USE'
return action.upper()
@staticmethod
def parse_msg(line):
sline = line.split(' ', 1)
nick = line[1:sline[0].find('!')]
msg_start = sline[1].find(':', 1)
msg_chan = sline[1].find('#', 1)
msg = sline[1][msg_start + 1:].strip() if msg_start > 0 else ''
end = msg_start if msg_start > 0 else len(sline[1])
channel = sline[1][msg_chan:end].strip()
return nick, msg, channel
def self_input(self, channel, msg, line):
for plugin in self.plugins:
try:
plugin.handle_say(channel, msg, line)
except Exception:
return self.bot.log_error('Parsing self line error: ' + line)
def handle_end_motd(self, line):
# after server MOTD, join desired channel
for channel in settings.CHANNELS:
self.bot.known_users[channel] = {}
self.bot.write('JOIN ' + channel)
def handle_names_list(self, line):
# after NAMES list, the bot is in the channel
_, _, channel = self.parse_msg(line)
for nick in self.usertrim.sub('', line.split(':')[2]).split(' '):
self.bot.known_users[channel][nick.lower()] = nick
def handle_end_names(self, line):
self.joined_channel = True
def handle_nick_in_use(self, line):
self.bot.next_nick()
def handle_channel_input(self, action_code, line):
try:
nick, msg, channel = self.parse_msg(line)
except Exception:
return self.bot.log_error('Parsing msg line error: ' + line)
action = self._channel_actions.get(action_code)
if action is not None:
action(channel, nick, msg)
# Run plugins
for plugin in self.plugins:
plugin.handle_message(channel, nick, msg, line)
def new_input(self, line):
try:
action_code = self._get_action_code(line)
except Exception:
return self.bot.log_error('IRC error: ' + line)
action = self._actions.get(action_code)
if action is not None:
return action(line)
elif self.joined_channel: # respond to some messages
self.handle_channel_input(action_code, line)
def init_actions(self):
self._actions = {
'PING': lambda line: self.bot.write('PONG'), # ping-pong
'END_MOTD': self.handle_end_motd,
'NAMES_LIST': self.handle_names_list,
'NOTICE': lambda line: None,
'MODE': lambda line: None,
'END_NAMES': self.handle_end_names,
'NICK_IN_USE': self.handle_nick_in_use,
}
self._channel_actions = {
'JOIN': self.bot.add_user,
'QUIT': self.bot.remove_user,
'PART': self.bot.part_user,
'NICK': self.bot.change_user,
}
| null | src/logic.py | logic.py | py | 4,222 | python | en | code | null | code-starcoder2 | 51 |
28002487 | from __future__ import print_function
import numpy as np
import time
import math
from ..box import centered_box
from ..tensor import WritableTensorData as WTD, \
WritableTensorDataWithMask as WTDM
from ..emio import imsave
def prepare_outputs(spec, locs, blend=False, blend_mode='', stride=None):
blend_pool = ['', 'bump', 'aligned-bump']
b = blend_mode.lower()
if b not in blend_pool:
raise RuntimeError('unknown output blend type [%s]' % b)
if b == '':
b = 'Blend'
elif b == 'aligned-bump':
b = 'AlignedBumpBlend'
else:
b = b[0].capitalize() + b[1:] + 'Blend'
# print('blending mode: {}'.format(b))
outputs = eval(b + '(spec, locs, blend, stride)')
return outputs
class Blend(object):
"""
Blend interface.
"""
def __init__(self, spec, locs, blend=False, stride=None):
"""Initialize Blend."""
self.spec = spec
self.locs = locs
self.blend = blend
self._prepare_data()
def push(self, loc, sample):
"""Write to data."""
for k, v in sample.items():
assert k in self.data
self.data[k].set_patch(loc, v, op=self.op)
def get_data(self, key):
"""Get inference output data."""
assert key in self.data
return self.data[key].get_data()
def voxels(self):
voxels = list()
for k, v in self.data.items():
voxels.append(np.prod(v.dim()))
return min(voxels)
####################################################################
## Private Methods.
####################################################################
def _prepare_data(self):
"""
TODO(kisuk): Documentation.
"""
assert len(self.locs) > 0
rmin = self.locs[0]
rmax = self.locs[-1]
self.data = dict()
self.op = None
for k, v in self.spec.items():
fov = v[-3:]
a = centered_box(rmin, fov)
b = centered_box(rmax, fov)
c = a.merge(b)
shape = v[:-3] + tuple(c.size())
# Inference with overlapping window.
if self.blend:
self.data[k] = WTDM(shape, fov, c.min())
self.op = 'np.add'
else:
self.data[k] = WTD(shape, fov, c.min())
class BumpBlend(Blend):
"""
Blending with bump function.
"""
def __init__(self, spec, locs, blend=False, stride=None):
"""Initialize BumpBlend."""
super().__init__(spec, locs, blend=blend, stride=stride)
self.logit_maps = dict()
# Inference with overlapping window.
self.max_logits = None
if blend:
max_logits = dict()
# Compute max_logit for numerical stability.
for k, v in self.data.items():
fov = tuple(v.fov())
data = np.full(v.dim(), -np.inf, dtype='float32')
max_logit = WTD(data, fov, v.offset())
max_logit_window = self._bump_logit_map(fov)
for loc in self.locs:
max_logit.set_patch(loc, max_logit_window, op='np.maximum')
max_logits[k] = max_logit
self.max_logits = max_logits
def push(self, loc, sample):
"""Blend with data."""
for k, v in sample.items():
assert k in self.data
t0 = time.time()
mask = self.get_mask(k, loc)
t1 = time.time() - t0
self.data[k].set_patch(loc, v, op=self.op, mask=mask)
t2 = time.time() - t0
print('get_mask: %.3f, set_patch: %.3f' % (t1, t2-t1))
def get_mask(self, key, loc):
mask = None
if self.blend:
assert key in self.max_logits
max_logit = self.max_logits[key].get_patch(loc)
mask = self._bump_map(max_logit.shape[-3:], max_logit[0, ...])
return mask
####################################################################
## Private methods.
####################################################################
def _bump_logit(self, z, y, x, t=1.5):
return -(x*(1-x))**(-t)-(y*(1-y))**(-t)-(z*(1-z))**(-t)
def _bump_logit_map(self, dim):
ret = self.logit_maps.get(dim)
if ret is None:
x = range(dim[-1])
y = range(dim[-2])
z = range(dim[-3])
zv, yv, xv = np.meshgrid(z, y, x, indexing='ij')
xv = (xv+1.0)/(dim[-1]+1.0)
yv = (yv+1.0)/(dim[-2]+1.0)
zv = (zv+1.0)/(dim[-3]+1.0)
ret = self._bump_logit(zv, yv, xv)
self.logit_maps[dim] = ret
return ret
def _bump_map(self, dim, max_logit):
return np.exp(self._bump_logit_map(dim) - max_logit)
class AlignedBumpBlend(Blend):
"""
Blending with bump function with aligned patches.
"""
def __init__(self, spec, locs, blend=True, stride=None):
"""Initialize BumpBlend."""
# note that the blend mode is always False in parent class to avoid
# using the chunk-wise mask
super().__init__(spec, locs, False)
self.patch_masks = dict()
# always add the patches, this will take effect in the push
# functions of Blend class
for k, v in self.data.items():
fov = v.fov()
assert stride
if all(np.less_equal(stride, 1.0)):
# this is in percentile, need to transform to voxel based
fov = list(self.data.values()).fov()
stride_by_voxel = (f-math.round(f*s) for (f, s) in
zip(fov, stride))
else:
stride_by_voxel = stride
print('stride: {}'.format(stride))
assert all(np.greater_equal(stride_by_voxel, 1))
mask = self._make_mask(fov, stride_by_voxel)
assert np.less_equal(mask, 1.0).all()
self.patch_masks[k] = mask
self._save_mask()
def push(self, loc, sample):
"""Write to data."""
for k, v in sample.items():
t0 = time.time()
np.multiply(v, self.patch_masks[k], v)
self.data[k].set_patch(loc, v, op='np.add')
t1 = time.time() - t0
print('blending: %.3f sec' % t1)
####################################################################
## Private methods.
####################################################################
def _save_mask(self):
for k, v in self.patch_masks.items():
imsave(v, '/tmp/patch_mask_{}.tif'.format(k))
def _make_mask(self, fov, stride_by_voxel):
"""
_make_mask( size )
params:
size:tuple of int
return:
an numpy array with data type of float32. The value was generated
using a bump function. the overlapping borders and corners were
normalized according to weight accumulation.
https://en.wikipedia.org/wiki/Bump_function
"""
stride = stride_by_voxel
bump_map = self._make_bump_map(fov)
# use 3x3x3 mask addition to figure out the normalization parameter
# this is a simulation of blending
base_mask = np.zeros(tuple(f+2*s for (f, s) in zip(fov, stride)),
dtype='float64')
print('fov: {}, stride: {}'.format(fov, stride))
print('shape of base mask: {}'.format(base_mask.shape))
for nz in range(3):
for ny in range(3):
for nx in range(3):
base_mask[nz*stride[0]:nz*stride[0]+fov[0],
ny*stride[1]:ny*stride[1]+fov[1],
nx*stride[2]:nx*stride[2]+fov[2]] += bump_map
bump_map /= base_mask[stride[0]:stride[0]+fov[0],
stride[1]:stride[1]+fov[1],
stride[2]:stride[2]+fov[2]]
return np.asarray(bump_map, dtype='float32')
def _make_bump_map(self, dim):
x = range(dim[-1])
y = range(dim[-2])
z = range(dim[-3])
zv, yv, xv = np.meshgrid(z, y, x, indexing='ij')
xv = (xv+1.0)/(dim[-1]+1.0) * 2.0 - 1.0
yv = (yv+1.0)/(dim[-2]+1.0) * 2.0 - 1.0
zv = (zv+1.0)/(dim[-3]+1.0) * 2.0 - 1.0
bump_map = np.exp(-1.0/(1.0-xv*xv) +
-1.0/(1.0-yv*yv) +
-1.0/(1.0-zv*zv))
return np.asarray(bump_map, dtype='float64')
| null | python/dataprovider/inference/blend.py | blend.py | py | 8,556 | python | en | code | null | code-starcoder2 | 50 |
6401508 | #coding=utf8
#__author__chry
#__date:2018/4/23
from multiprocessing import Process,Manager
def f(d,l,n):
d[n] = '1'
d['2'] = 2
d[0.25] = None
l.append(n)
print(l)
if __name__=='__main__':
with Manager() as manger:
d = manger.dict()
l = manger.list(range(5))
p_list=[]
for i in range(10):
p=Process(target=f,args=(d,l,i))
p.start()
p_list.append(p)
for res in p_list:
res.join() | null | threading_learing/Manger.py | Manger.py | py | 405 | python | en | code | null | code-starcoder2 | 50 |
524586540 | import sys
TODO_FILE = 'todo.txt'
ARCHIVE_FILE = 'done.txt'
RED = "\033[1;31m"
BLUE = "\033[0;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
YELLOW = "\033[0;33m"
ADICIONAR = 'a'
REMOVER = 'r'
FAZER = 'f'
PRIORIZAR = 'p'
LISTAR = 'l'
def printCores(texto, cor) :
print(cor + texto + RESET)
def adicionar(descricao,extras):
if descricao == '' :
return False
espaço =' '
vazio =''
novaAtividade =''
for x in range(0,3):
if extras[x] is not '':
novaAtividade += extras[x]+espaço
novaAtividade += descricao + espaço
for x in range(3,5):
if extras[x] is not '':
novaAtividade += extras[x]+espaço
print('Atividade Salva: {}'.format(novaAtividade))
# Escreve no TODO_FILE.
try:
fp = open(TODO_FILE, 'a')
fp.write(novaAtividade + "\n")
fp.close()
except IOError as err:
print("Não foi possível escrever para o arquivo " + TODO_FILE)
print(err)
return False
return True
def horaValida(hora):
checagem = soDigitos(hora)
if len(hora) != 4 or checagem == False :
return False
else:
horas = int(hora[0:2])
minutos = int(hora[2:5])
if horas in range(0,24) and minutos in range(0,60):
return True
return False
def dataValida(data):
check = soDigitos(data)
if len(data)!= 8 or check == False:
return False
else:
dia = int(data[0:2])
mes = int(data[2:4])
ano = int(data[4:9])
if mes not in range(1,13):
return False
if mes == 2:
if dia in range(1,30):
return True
return False
if mes ==1==3==5==7==8==10==12:
if dia in range(1,32):
return True
return False
else:
if dia in range(1,31):
return True
return False
def projetoValido(proj):
if len(proj)< 2 or proj[0] != '+':
return False
return True
def contextoValido(contex):
if len(contex)< 2 or contex[0] != '@':
return False
return True
def prioridadeValida(pri):
alfa = 'abcdefghojklmnopqrtuvwxyzABCDEFGHIJKLMNOPQRTUVWXYZ'
aux = '()'
if len(pri) == 3 and pri[0] in aux and pri[1] in alfa and pri[2] in aux :
return True
return False
def organizar(linhas):
itens = []
for l in linhas:
data = ''
hora = ''
pri = ''
desc = ''
proj = ''
contex = ''
l = linhas.strip()
tokens = l.split()
if dataValida(tokens[0])== True:
data = tokens[0]
tokens.pop(0)
if horaValida(tokens[0])== True:
hora = tokens[0]
tokens.pop(0)
if prioridadeValida(tokens[0]) == True:
pri = tokens[0]
tokens.pop(0)
if len(tokens) > 1 and contextoValido(tokens[-2])== True:
contex = tokens[-2]
tokens.pop(-2)
if projetoValido(tokens[-1])== True:
proj = tokens[-1]
tokens.pop(-1)
for x in tokens:
desc += x +' '
itens.append((desc,(data, hora, pri, contex, proj)))
return itens
def organizadoBonito(bagunca):
novaOrdem = []
junto= ''
espaço =' '
if bagunca[0][1][0] != '':
dataNaOrdem = dataBonita(bagunca[0][1][0])
junto+= dataNaOrdem +espaço
if bagunca[0][1][1] != '':
horaNaOrdem = horaBonita(bagunca[0][1][1])
junto+= horaNaOrdem +espaço
if bagunca[0][1][2] != '':
priNaOrdem = bagunca[0][1][2]
junto+= priNaOrdem +espaço
if bagunca [0][0] != '':
descNaOrdem = bagunca[0][0]
junto+= descNaOrdem +espaço
if bagunca[0][1][3] != '':
contxtNaOrdem = bagunca[0][1][3]
junto+= contxtNaOrdem +espaço
if bagunca[0][1][4] != '':
projNaOrdem = bagunca[0][1][4]
junto+= projNaOrdem +espaço
return junto
def dataBonita(data):
dia = data[0:2] + '/'
mes = data[2:4] + '/'
ano = data[4:9]
dataNova = dia+mes+ano
return dataNova
def horaBonita(hora):
h = hora[0:2] + 'h'
m = hora[2:4] + 'm'
horaNova = h+m
return horaNova
def soDigitos(numero) :
if type(numero) != str :
return False
for x in numero :
if x < '0' or x > '9' :
return False
return True
def listar():
organizacao= []
arquivo= open(TODO_FILE,'r')
todasAsLinhas = arquivo.readlines()
arquivo.close()
tamanho = len(todasAsLinhas)
for i in range(tamanho):
tarefa = organizar(todasAsLinhas[i])
organizacao.append(tarefa)
porPrioridade = ordenarPorPrioridade(organizacao)
tudoOrdenado = ordenarPorDataHora(porPrioridade)
return tudoOrdenado
def ordenarPorDataHora(itens):
qte = len(itens)-1
cont = 26
while cont != 0 :
for x in range(qte):
if itens[x][0][1][2] == itens[x+1][0][1][2]:
if itens[x][0][1][0] > itens[x+1][0][1][0] and itens[x][0][1][0] is not '':
itens[x],itens[x+1] = itens[x+1] , itens[x]
if itens[x][0][1][0] == itens[x+1][0][1][0]:
if itens[x][0][1][1] > itens[x+1][0][1][1] and itens[x+1][0][1][1] is not '' or itens[x][0][1][1] < itens[x+1][0][1][1] and itens[x][0][1][1] is '':
itens[x],itens[x+1] = itens[x+1],itens[x]
if itens[x][0][1][0] < itens[x+1][0][1][0] and itens[x][0][1][0] is '':
itens[x],itens[x+1] = itens[x+1] , itens[x]
cont -=1
return itens
def ordenarPorPrioridade(itens):
qteDeAtividade = len(itens)
vazio = ''
itens2=[]
alfa =('(A)','(B)','(C)','(D)','(E)','(F)','(G)','(H)','(I)','(J)','(K)','(L)','(M)','(N)','(O)','(P)','(Q)','(R)','(S)','(T)','(U)','(V)','(W)','(X)','(Y)','(Z)')
cont = 0
while cont != len(alfa):
for x in range(qteDeAtividade):
if itens[x][0][1][2] == alfa[cont]:
itens2.append(itens[x])
cont+=1
for x in range(qteDeAtividade):
if itens[x][0][1][2] == vazio:
itens2.append(itens[x])
itens = itens2
return itens
def abrir():
agenda =open(TODO_FILE,'r')
todasAsLinhas = agenda.readlines()
agenda.close
return todasAsLinhas
def reescrever(linhasParaEscrever):
qte = len(linhasParaEscrever)
arquivo = open(TODO_FILE,'w')
for x in range(qte):
arquivo.write(linhasParaEscrever[x])
arquivo.close()
def adicionaFeito(atividade):
arquivoFeito = open(ARCHIVE_FILE,'a')
arquivoFeito.write(atividade + ' \n')
arquivoFeito.close()
def fazer(num):
linhasDoArquivo = abrir()
listagem = listar()
atividadeRealizada = organizadoBonito(listagem[num])
qte = len(linhasDoArquivo)
cont = 0
while cont < qte:
pivo = linhasDoArquivo[cont]
pivo = organizadoBonito(organizar(pivo))
if pivo[:-2] == atividadeRealizada[:-2]:
adicionaFeito(pivo)
linhasDoArquivo.pop(cont)
qte-=1
cont +=1
print('{}foi feito'.format(atividadeRealizada))
return reescrever(linhasDoArquivo)
def remover(x):
linhasDoArquivo = abrir()
listagem = listar()
item = organizadoBonito(listagem[x])
qte = len(linhasDoArquivo)
cont = 0
while cont < qte:
pivo = linhasDoArquivo[cont]
pivo = organizadoBonito(organizar(pivo))
if pivo[:-2] == item[:-2]:
linhasDoArquivo.pop(cont)
qte-=1
cont +=1
print('{}foi removido'.format(item))
return reescrever(linhasDoArquivo)
def frases(elem,elem2):
desc = elem
data = elem2[0]
hora = elem2[1]
pri = elem2[2]
ctx =elem2[3]
proj =elem2[4]
novaFrase=''
espaço =' '
if data is not '':
novaFrase +=data+espaço
if hora is not '':
novaFrase +=hora+espaço
if pri is not '':
novaFrase+=pri+espaço
if desc is not '':
novaFrase+=desc+espaço
if ctx is not '':
novaFrase+=ctx+espaço
if proj is not '':
novaFrase+=proj
return novaFrase +'\n'
def priorizar(num, prioridade):
alterado = []
atualizado =[]
prioridade = '('+prioridade+') '
linhasDoArquivo = abrir()
a = listar()
for x in range(len(a)):
alterado.append(a[x][0])
novadesc =alterado[num][0]
data = alterado[num][1][0]
hora = alterado[num][1][1]
contexto = alterado[num][1][3]
projeto = alterado[num][1][4]
novaAtiv = (novadesc, (data, hora, prioridade, contexto, projeto))
alterado.pop(num)
alterado.insert(num,novaAtiv)
for x in range(len(alterado)):
novosItens = frases(alterado[x][0],alterado[x][1])
atualizado.append(novosItens)
print('Prioridade Alterada')
return reescrever(atualizado)
def processarComandos(comandos):
if comandos[1] == ADICIONAR:
comandos.pop(0)
comandos.pop(0)
itemParaAdicionar = organizar(' '.join(comandos))[0]
adicionar(itemParaAdicionar[0],itemParaAdicionar[1])
elif comandos[1] == LISTAR:
if listar() == []:
return False
else:
a = listar()
for x in range(len(a)):
ordem = x+1
espaço = ' '
tarefa = str(ordem)+espaço+organizadoBonito(a[x])
if a[x][0][1][2] == '(A)':
printCores(tarefa,CYAN)
elif a[x][0][1][2] == '(B)':
printCores(tarefa,GREEN)
elif a[x][0][1][2] == '(C)':
printCores(tarefa,YELLOW)
elif a[x][0][1][2] == '(D)' :
printCores(tarefa,BLUE)
else:
print('{}'.format(tarefa))
elif comandos[1] == REMOVER:
atividade = comandos[2]
atividade = int(atividade)
if atividade not in range(len(listar)):
print('A atividade indicada não existe')
return remover(atividade-1)
elif comandos[1] == FAZER:
colocacao = comandos[2]
colocacao = int(colocacao)
if colocacao not in range(len(listar())):
print('A atividade indica não existe')
return False
else:
return fazer(colocacao-1)
elif comandos[1] == PRIORIZAR:
alfa ='QWERTYUIOPASDFGHJKLÇZXCVBNM'
ordem = comandos[2]
ordem = int(ordem)
prioridade = comandos[3].upper()
if prioridade not in alfa or ordem not in range(len(listar())):
print('A atividade indica não existe')
return False
else:
return priorizar(ordem-1,prioridade)
else :
print("Comando inválido.")
processarComandos(sys.argv)
| null | projeto.py | projeto.py | py | 10,348 | python | en | code | null | code-starcoder2 | 50 |
306174066 |
import argparse
import os
import torch
import posenet
def valid_tensor(s):
msg = "Not a valid resolution: '{0}' [CxHxW].".format(s)
try:
q = s.split('x')
if len(q) != 3:
raise argparse.ArgumentTypeError(msg)
return [int(v) for v in q]
except ValueError:
raise argparse.ArgumentTypeError(msg)
def parse_args():
parser = argparse.ArgumentParser(description='Posenet exporter')
parser.add_argument('-m','--model', type=int, default=101) # integer depth multiplier (50, 75, 100, 101)
parser.add_argument('-s','--output_stride', type=int, default=16) # 16
parser.add_argument('-r', '--ONNX_resolution', default="3x480x640", type=valid_tensor,
help='ONNX input resolution')
parser.add_argument('-o', '--outfile', default='./out.onnx',
help='output file path')
args = parser.parse_args()
return args
def main():
args = parse_args()
model = posenet.load_model(args.model, output_stride=args.output_stride)
# Export ONNX file
input_names = [ "input:0" ] # this are our standardized in/out nameing (required for runtime)
output_names = [ "output:0" ]
dummy_input = torch.randn([1]+args.ONNX_resolution)
ONNX_path = args.outfile
# Exporting -- CAFFE2 compatible
# requires operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
# https://github.com/pytorch/pytorch/issues/41848
# for CAFFE2 backend (old exports mode...)
#torch.onnx.export(model, dummy_input, ONNX_path, input_names=input_names, output_names=output_names,
# keep_initializers_as_inputs=True, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
# Exporting -- ONNX runtime compatible
# keep_initializers_as_inputs=True -> is required for onnx optimizer...
torch.onnx.export(model, dummy_input, ONNX_path, input_names=input_names, output_names=output_names,
keep_initializers_as_inputs=True, opset_version=11)
if __name__ == '__main__':
main()
| null | export.py | export.py | py | 2,056 | python | en | code | null | code-starcoder2 | 50 |
63967556 | from socket import *
import sys
from time import ctime
# 收集命令行信息(字符串类型),将参数传进来,作为对应的IP地址和端口号.
HOST = sys.argv[1]
PORT = int(sys.argv[2])
ADDR = (HOST, PORT)
BUFFERSIZE = 1024
# 1.创建数据报套接字
sockfd = socket(AF_INET, SOCK_DGRAM)
# 设置套接字选项,将端口号设置为立即重用
sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# 2.绑定本地IP和端口号
sockfd.bind(ADDR)
# 3.收发消息
while True:
data, addr = sockfd.recvfrom(BUFFERSIZE)
print("接收", addr, "消息内容:", data.decode())
sockfd.sendto(("在 %s 接收到消息" % ctime()).encode(), addr)
# 4.关闭套接字
sockfd.close()
| null | aid1805/PythonNet/day02/udp_server.py | udp_server.py | py | 707 | python | en | code | null | code-starcoder2 | 50 |
108698527 | import FWCore.ParameterSet.Config as cms
process = cms.Process("Analyzer")
## configure message logger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.threshold = 'INFO'
process.MessageLogger.cerr.FwkReport.reportEvery = 10
## define input
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
## add your favourite file here
'/store/mc/Spring14dr/TTJets_MSDecaysCKM_central_Tune4C_13TeV-madgraph-tauola/AODSIM/PU_S14_POSTLS170_V6-v1/00000/00120F7A-84F5-E311-9FBE-002618943910.root',
),
skipEvents = cms.untracked.uint32(0)
)
## define maximal number of events to loop over
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
# get sequence for B-hadron matching
process.load("PhysicsTools.JetMCAlgos.sequences.GenHFHadronMatching_cff")
process.p1 = cms.Path(
process.genBCHadronMatchingSequence
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('genHFHadronMatching_out.root'),
outputCommands = cms.untracked.vstring('drop *', 'keep *_matchGen*_*_*')
)
process.outpath = cms.EndPath(process.out)
| null | PhysicsTools/JetMCAlgos/test/genHFHadronMatcher.py | genHFHadronMatcher.py | py | 1,166 | python | en | code | null | code-starcoder2 | 50 |
222406497 | from tkinter import *
class WidgetsDemo:
def __init__(self):
window = Tk()
window.title("Widgets Demo")
frame1 = Frame(window)
frame1.pack()
self.v1 = IntVar()
cbtBold = Checkbutton(frame1,text = "Bold",
variable = self.v1,
command = self.processCheckbutton)
self.v2 = IntVar()
rbRed = Radiobutton(frame1,text = "Red",
bg = "red",
variable = self.processRadiobutton)
rbYellow = Radiobutton(frame1,text = "Yellow",
bg = "yellow",
variable = self.v2,
value = 2,
command = self.processRadiobutton)
| null | zz/python二级/tkinter Demo.py | tkinter Demo.py | py | 840 | python | en | code | null | code-starcoder2 | 51 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.